1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
22 #.apidoc title: Object Relational Mapping
23 #.apidoc module-mods: member-order: bysource
26 Object relational mapping to database (postgresql) module
27 * Hierarchical structure
28 * Constraints consistency, validations
29 * Object meta Data depends on its status
30 * Optimised processing by complex query (multiple actions at once)
31 * Default fields value
32 * Permissions optimisation
33 * Persistant object: DB postgresql
35 * Multi-level caching system
36 * 2 different inheritancies
38 - classicals (varchar, integer, boolean, ...)
39 - relations (one2many, many2one, many2many)
57 from lxml import etree
61 import openerp.netsvc as netsvc
62 import openerp.tools as tools
63 from openerp.tools.config import config
64 from openerp.tools.safe_eval import safe_eval as eval
65 from openerp.tools.translate import _
66 from openerp import SUPERUSER_ID
67 from query import Query
69 # List of etree._Element subclasses that we choose to ignore when parsing XML.
70 from openerp.tools import SKIPPED_ELEMENT_TYPES
72 regex_order = re.compile('^(([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
73 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
75 def transfer_field_to_modifiers(field, modifiers):
78 for attr in ('invisible', 'readonly', 'required'):
79 state_exceptions[attr] = []
80 default_values[attr] = bool(field.get(attr))
81 for state, modifs in (field.get("states",{})).items():
83 if default_values[modif[0]] != modif[1]:
84 state_exceptions[modif[0]].append(state)
86 for attr, default_value in default_values.items():
87 if state_exceptions[attr]:
88 modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
90 modifiers[attr] = default_value
93 # Don't deal with groups, it is done by check_group().
94 # Need the context to evaluate the invisible attribute on tree views.
95 # For non-tree views, the context shouldn't be given.
96 def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
98 modifiers.update(eval(node.get('attrs')))
100 if node.get('states'):
101 if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
102 # TODO combine with AND or OR, use implicit AND for now.
103 modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
105 modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
107 for a in ('invisible', 'readonly', 'required'):
109 v = bool(eval(node.get(a), {'context': context or {}}))
110 if in_tree_view and a == 'invisible':
111 # Invisible in a tree view has a specific meaning, make it a
112 # new key in the modifiers attribute.
113 modifiers['tree_invisible'] = v
114 elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
115 # Don't set the attribute to False if a dynamic value was
116 # provided (i.e. a domain from attrs or states).
120 def simplify_modifiers(modifiers):
121 for a in ('invisible', 'readonly', 'required'):
122 if a in modifiers and not modifiers[a]:
126 def transfer_modifiers_to_node(modifiers, node):
128 simplify_modifiers(modifiers)
129 node.set('modifiers', simplejson.dumps(modifiers))
131 def setup_modifiers(node, field=None, context=None, in_tree_view=False):
132 """ Processes node attributes and field descriptors to generate
133 the ``modifiers`` node attribute and set it on the provided node.
135 Alters its first argument in-place.
137 :param node: ``field`` node from an OpenERP view
138 :type node: lxml.etree._Element
139 :param dict field: field descriptor corresponding to the provided node
140 :param dict context: execution context used to evaluate node attributes
141 :param bool in_tree_view: triggers the ``tree_invisible`` code
142 path (separate from ``invisible``): in
143 tree view there are two levels of
144 invisibility, cell content (a column is
145 present but the cell itself is not
146 displayed) with ``invisible`` and column
147 invisibility (the whole column is
148 hidden) with ``tree_invisible``.
152 if field is not None:
153 transfer_field_to_modifiers(field, modifiers)
154 transfer_node_to_modifiers(
155 node, modifiers, context=context, in_tree_view=in_tree_view)
156 transfer_modifiers_to_node(modifiers, node)
158 def test_modifiers(what, expected):
160 if isinstance(what, basestring):
161 node = etree.fromstring(what)
162 transfer_node_to_modifiers(node, modifiers)
163 simplify_modifiers(modifiers)
164 json = simplejson.dumps(modifiers)
165 assert json == expected, "%s != %s" % (json, expected)
166 elif isinstance(what, dict):
167 transfer_field_to_modifiers(what, modifiers)
168 simplify_modifiers(modifiers)
169 json = simplejson.dumps(modifiers)
170 assert json == expected, "%s != %s" % (json, expected)
175 # openerp.osv.orm.modifiers_tests()
176 def modifiers_tests():
177 test_modifiers('<field name="a"/>', '{}')
178 test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
179 test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
180 test_modifiers('<field name="a" required="1"/>', '{"required": true}')
181 test_modifiers('<field name="a" invisible="0"/>', '{}')
182 test_modifiers('<field name="a" readonly="0"/>', '{}')
183 test_modifiers('<field name="a" required="0"/>', '{}')
184 test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
185 test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
186 test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
187 test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
189 # The dictionary is supposed to be the result of fields_get().
190 test_modifiers({}, '{}')
191 test_modifiers({"invisible": True}, '{"invisible": true}')
192 test_modifiers({"invisible": False}, '{}')
195 def check_object_name(name):
196 """ Check if the given name is a valid openerp object name.
198 The _name attribute in osv and osv_memory object is subject to
199 some restrictions. This function returns True or False whether
200 the given name is allowed or not.
202 TODO: this is an approximation. The goal in this approximation
203 is to disallow uppercase characters (in some places, we quote
204 table/column names and in other not, which leads to this kind
207 psycopg2.ProgrammingError: relation "xxx" does not exist).
209 The same restriction should apply to both osv and osv_memory
210 objects for consistency.
213 if regex_object_name.match(name) is None:
217 def raise_on_invalid_object_name(name):
218 if not check_object_name(name):
219 msg = "The _name attribute %s is not valid." % name
220 logger = netsvc.Logger()
221 logger.notifyChannel('orm', netsvc.LOG_ERROR, msg)
222 raise except_orm('ValueError', msg)
224 POSTGRES_CONFDELTYPES = {
232 def last_day_of_current_month():
233 today = datetime.date.today()
234 last_day = str(calendar.monthrange(today.year, today.month)[1])
235 return time.strftime('%Y-%m-' + last_day)
237 def intersect(la, lb):
238 return filter(lambda x: x in lb, la)
240 def fix_import_export_id_paths(fieldname):
242 Fixes the id fields in import and exports, and splits field paths
245 :param str fieldname: name of the field to import/export
246 :return: split field name
249 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
250 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
251 return fixed_external_id.split('/')
253 class except_orm(Exception):
254 def __init__(self, name, value):
257 self.args = (name, value)
259 class BrowseRecordError(Exception):
262 class browse_null(object):
263 """ Readonly python database object browser
269 def __getitem__(self, name):
272 def __getattr__(self, name):
273 return None # XXX: return self ?
281 def __nonzero__(self):
284 def __unicode__(self):
289 # TODO: execute an object method on browse_record_list
291 class browse_record_list(list):
292 """ Collection of browse objects
294 Such an instance will be returned when doing a ``browse([ids..])``
295 and will be iterable, yielding browse() objects
298 def __init__(self, lst, context=None):
301 super(browse_record_list, self).__init__(lst)
302 self.context = context
305 class browse_record(object):
306 """ An object that behaves like a row of an object's table.
307 It has attributes after the columns of the corresponding object.
311 uobj = pool.get('res.users')
312 user_rec = uobj.browse(cr, uid, 104)
315 logger = netsvc.Logger()
317 def __init__(self, cr, uid, id, table, cache, context=None, list_class=None, fields_process=None):
319 @param cache a dictionary of model->field->data to be shared accross browse
320 objects, thus reducing the SQL read()s . It can speed up things a lot,
321 but also be disastrous if not discarded after write()/unlink() operations
322 @param table the object (inherited from orm)
323 @param context dictionary with an optional context
325 if fields_process is None:
329 self._list_class = list_class or browse_record_list
333 self._table = table # deprecated, use _model!
335 self._table_name = self._table._name
336 self.__logger = logging.getLogger(
337 'osv.browse_record.' + self._table_name)
338 self._context = context
339 self._fields_process = fields_process
341 cache.setdefault(table._name, {})
342 self._data = cache[table._name]
344 if not (id and isinstance(id, (int, long,))):
345 raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
346 # if not table.exists(cr, uid, id, context):
347 # raise BrowseRecordError(_('Object %s does not exists') % (self,))
349 if id not in self._data:
350 self._data[id] = {'id': id}
354 def __getitem__(self, name):
358 if name not in self._data[self._id]:
359 # build the list of fields we will fetch
361 # fetch the definition of the field which was asked for
362 if name in self._table._columns:
363 col = self._table._columns[name]
364 elif name in self._table._inherit_fields:
365 col = self._table._inherit_fields[name][2]
366 elif hasattr(self._table, str(name)):
367 attr = getattr(self._table, name)
368 if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)):
369 def function_proxy(*args, **kwargs):
370 if 'context' not in kwargs and self._context:
371 kwargs.update(context=self._context)
372 return attr(self._cr, self._uid, [self._id], *args, **kwargs)
373 return function_proxy
377 error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
378 self.logger.notifyChannel("browse_record", netsvc.LOG_WARNING, error_msg)
379 raise KeyError(error_msg)
381 # if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
383 # gen the list of "local" (ie not inherited) fields which are classic or many2one
384 fields_to_fetch = filter(lambda x: x[1]._classic_write, self._table._columns.items())
385 # gen the list of inherited fields
386 inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
387 # complete the field list with the inherited fields which are classic or many2one
388 fields_to_fetch += filter(lambda x: x[1]._classic_write, inherits)
389 # otherwise we fetch only that field
391 fields_to_fetch = [(name, col)]
392 ids = filter(lambda id: name not in self._data[id], self._data.keys())
394 field_names = map(lambda x: x[0], fields_to_fetch)
395 field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
397 # TODO: improve this, very slow for reports
398 if self._fields_process:
399 lang = self._context.get('lang', 'en_US') or 'en_US'
400 lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid, [('code', '=', lang)])
402 raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,))
403 lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid, lang_obj_ids[0])
405 for field_name, field_column in fields_to_fetch:
406 if field_column._type in self._fields_process:
407 for result_line in field_values:
408 result_line[field_name] = self._fields_process[field_column._type](result_line[field_name])
409 if result_line[field_name]:
410 result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj)
413 # Where did those ids come from? Perhaps old entries in ir_model_dat?
414 self.__logger.warn("No field_values found for ids %s in %s", ids, self)
415 raise KeyError('Field %s not found in %s'%(name, self))
416 # create browse records for 'remote' objects
417 for result_line in field_values:
419 for field_name, field_column in fields_to_fetch:
420 if field_column._type in ('many2one', 'one2one'):
421 if result_line[field_name]:
422 obj = self._table.pool.get(field_column._obj)
423 if isinstance(result_line[field_name], (list, tuple)):
424 value = result_line[field_name][0]
426 value = result_line[field_name]
428 # FIXME: this happen when a _inherits object
429 # overwrite a field of it parent. Need
430 # testing to be sure we got the right
431 # object and not the parent one.
432 if not isinstance(value, browse_record):
434 # In some cases the target model is not available yet, so we must ignore it,
435 # which is safe in most cases, this value will just be loaded later when needed.
436 # This situation can be caused by custom fields that connect objects with m2o without
437 # respecting module dependencies, causing relationships to be connected to soon when
438 # the target is not loaded yet.
440 new_data[field_name] = browse_record(self._cr,
441 self._uid, value, obj, self._cache,
442 context=self._context,
443 list_class=self._list_class,
444 fields_process=self._fields_process)
446 new_data[field_name] = value
448 new_data[field_name] = browse_null()
450 new_data[field_name] = browse_null()
451 elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
452 new_data[field_name] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in result_line[field_name]], self._context)
453 elif field_column._type in ('reference'):
454 if result_line[field_name]:
455 if isinstance(result_line[field_name], browse_record):
456 new_data[field_name] = result_line[field_name]
458 ref_obj, ref_id = result_line[field_name].split(',')
459 ref_id = long(ref_id)
461 obj = self._table.pool.get(ref_obj)
462 new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
464 new_data[field_name] = browse_null()
466 new_data[field_name] = browse_null()
468 new_data[field_name] = result_line[field_name]
469 self._data[result_line['id']].update(new_data)
471 if not name in self._data[self._id]:
472 # How did this happen? Could be a missing model due to custom fields used too soon, see above.
473 self.logger.notifyChannel("browse_record", netsvc.LOG_ERROR,
474 "Fields to fetch: %s, Field values: %s"%(field_names, field_values))
475 self.logger.notifyChannel("browse_record", netsvc.LOG_ERROR,
476 "Cached: %s, Table: %s"%(self._data[self._id], self._table))
477 raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
478 return self._data[self._id][name]
480 def __getattr__(self, name):
484 raise AttributeError(e)
486 def __contains__(self, name):
487 return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
490 raise NotImplementedError("Iteration is not allowed on %s" % self)
492 def __hasattr__(self, name):
499 return "browse_record(%s, %d)" % (self._table_name, self._id)
501 def __eq__(self, other):
502 if not isinstance(other, browse_record):
504 return (self._table_name, self._id) == (other._table_name, other._id)
506 def __ne__(self, other):
507 if not isinstance(other, browse_record):
509 return (self._table_name, self._id) != (other._table_name, other._id)
511 # we need to define __unicode__ even though we've already defined __str__
512 # because we have overridden __getattr__
513 def __unicode__(self):
514 return unicode(str(self))
517 return hash((self._table_name, self._id))
522 """Force refreshing this browse_record's data and all the data of the
523 records that belong to the same cache, by emptying the cache completely,
524 preserving only the record identifiers (for prefetching optimizations).
526 for model, model_cache in self._cache.iteritems():
527 # only preserve the ids of the records that were in the cache
528 cached_ids = dict([(i, {'id': i}) for i in model_cache.keys()])
529 self._cache[model].clear()
530 self._cache[model].update(cached_ids)
532 def pg_varchar(size=0):
533 """ Returns the VARCHAR declaration for the provided size:
535 * If no size (or an empty or negative size is provided) return an
537 * Otherwise return a VARCHAR(n)
539 :type int size: varchar size, optional
543 if not isinstance(size, int):
544 raise TypeError("VARCHAR parameter should be an int, got %s"
547 return 'VARCHAR(%d)' % size
550 FIELDS_TO_PGTYPES = {
551 fields.boolean: 'bool',
552 fields.integer: 'int4',
553 fields.integer_big: 'int8',
557 fields.datetime: 'timestamp',
558 fields.binary: 'bytea',
559 fields.many2one: 'int4',
562 def get_pg_type(f, type_override=None):
564 :param fields._column f: field to get a Postgres type for
565 :param type type_override: use the provided type for dispatching instead of the field's own type
566 :returns: (postgres_identification_type, postgres_type_specification)
569 field_type = type_override or type(f)
571 if field_type in FIELDS_TO_PGTYPES:
572 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
573 elif issubclass(field_type, fields.float):
575 pg_type = ('numeric', 'NUMERIC')
577 pg_type = ('float8', 'DOUBLE PRECISION')
578 elif issubclass(field_type, (fields.char, fields.reference)):
579 pg_type = ('varchar', pg_varchar(f.size))
580 elif issubclass(field_type, fields.selection):
581 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
582 or getattr(f, 'size', None) == -1:
583 pg_type = ('int4', 'INTEGER')
585 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
586 elif issubclass(field_type, fields.function):
587 if f._type == 'selection':
588 pg_type = ('varchar', pg_varchar())
590 pg_type = get_pg_type(f, getattr(fields, f._type))
592 logging.getLogger('orm').warn('%s type not supported!', field_type)
598 class MetaModel(type):
599 """ Metaclass for the Model.
601 This class is used as the metaclass for the Model class to discover
602 the models defined in a module (i.e. without instanciating them).
603 If the automatic discovery is not needed, it is possible to set the
604 model's _register attribute to False.
608 module_to_models = {}
610 def __init__(self, name, bases, attrs):
611 if not self._register:
612 self._register = True
613 super(MetaModel, self).__init__(name, bases, attrs)
616 module_name = self.__module__.split('.')[0]
617 if not hasattr(self, '_module'):
618 self._module = module_name
620 # Remember which models to instanciate for this module.
621 self.module_to_models.setdefault(self._module, []).append(self)
624 # Definition of log access columns, automatically added to models if
625 # self._log_access is True
626 LOG_ACCESS_COLUMNS = {
627 'create_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
628 'create_date': 'TIMESTAMP',
629 'write_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
630 'write_date': 'TIMESTAMP'
632 # special columns automatically created by the ORM
633 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS.keys()
635 class BaseModel(object):
636 """ Base class for OpenERP models.
638 OpenERP models are created by inheriting from this class' subclasses:
640 * Model: for regular database-persisted models
641 * TransientModel: for temporary data, stored in the database but automatically
642 vaccuumed every so often
643 * AbstractModel: for abstract super classes meant to be shared by multiple
644 _inheriting classes (usually Models or TransientModels)
646 The system will later instantiate the class once per database (on
647 which the class' module is installed).
649 To create a class that should not be instantiated, the _register class attribute
652 __metaclass__ = MetaModel
653 _register = False # Set to false if the model shouldn't be automatically discovered.
659 _parent_name = 'parent_id'
660 _parent_store = False
661 _parent_order = False
668 _transient = False # True in a TransientModel
669 _transient_max_count = None
670 _transient_max_hours = None
671 _transient_check_time = 20
674 # { 'parent_model': 'm2o_field', ... }
677 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
678 # model from which it is inherits'd, r is the (local) field towards m, f
679 # is the _column object itself, and n is the original (i.e. top-most)
682 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
683 # field_column_obj, origina_parent_model), ... }
686 # Mapping field name/column_info object
687 # This is similar to _inherit_fields but:
688 # 1. includes self fields,
689 # 2. uses column_info instead of a triple.
695 _sql_constraints = []
696 _protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
697 __logger = logging.getLogger('orm')
698 __schema = logging.getLogger('orm.schema')
700 CONCURRENCY_CHECK_FIELD = '__last_update'
702 def log(self, cr, uid, id, message, secondary=False, context=None):
703 if context and context.get('disable_log'):
705 return self.pool.get('res.log').create(cr, uid,
708 'res_model': self._name,
709 'secondary': secondary,
715 def view_init(self, cr, uid, fields_list, context=None):
716 """Override this method to do specific things when a view on the object is opened."""
719 def _field_create(self, cr, context=None):
720 """ Create entries in ir_model_fields for all the model's fields.
722 If necessary, also create an entry in ir_model, and if called from the
723 modules loading scheme (by receiving 'module' in the context), also
724 create entries in ir_model_data (for the model and the fields).
726 - create an entry in ir_model (if there is not already one),
727 - create an entry in ir_model_data (if there is not already one, and if
728 'module' is in the context),
729 - update ir_model_fields with the fields found in _columns
730 (TODO there is some redundancy as _columns is updated from
731 ir_model_fields in __init__).
736 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
738 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
739 model_id = cr.fetchone()[0]
740 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
742 model_id = cr.fetchone()[0]
743 if 'module' in context:
744 name_id = 'model_'+self._name.replace('.', '_')
745 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
747 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, now(), now(), %s, %s, %s)", \
748 (name_id, context['module'], 'ir.model', model_id)
753 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
755 for rec in cr.dictfetchall():
756 cols[rec['name']] = rec
758 for (k, f) in self._columns.items():
760 'model_id': model_id,
763 'field_description': f.string.replace("'", " "),
765 'relation': f._obj or '',
766 'view_load': (f.view_load and 1) or 0,
767 'select_level': tools.ustr(f.select or 0),
768 'readonly': (f.readonly and 1) or 0,
769 'required': (f.required and 1) or 0,
770 'selectable': (f.selectable and 1) or 0,
771 'translate': (f.translate and 1) or 0,
772 'relation_field': (f._type=='one2many' and isinstance(f, fields.one2many)) and f._fields_id or '',
774 # When its a custom field,it does not contain f.select
775 if context.get('field_state', 'base') == 'manual':
776 if context.get('field_name', '') == k:
777 vals['select_level'] = context.get('select', '0')
778 #setting value to let the problem NOT occur next time
780 vals['select_level'] = cols[k]['select_level']
783 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
784 id = cr.fetchone()[0]
786 cr.execute("""INSERT INTO ir_model_fields (
787 id, model_id, model, name, field_description, ttype,
788 relation,view_load,state,select_level,relation_field, translate
790 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
792 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
793 vals['relation'], bool(vals['view_load']), 'base',
794 vals['select_level'], vals['relation_field'], bool(vals['translate'])
796 if 'module' in context:
797 name1 = 'field_' + self._table + '_' + k
798 cr.execute("select name from ir_model_data where name=%s", (name1,))
800 name1 = name1 + "_" + str(id)
801 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, now(), now(), %s, %s, %s)", \
802 (name1, context['module'], 'ir.model.fields', id)
805 for key, val in vals.items():
806 if cols[k][key] != vals[key]:
807 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
809 cr.execute("""UPDATE ir_model_fields SET
810 model_id=%s, field_description=%s, ttype=%s, relation=%s,
811 view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s
813 model=%s AND name=%s""", (
814 vals['model_id'], vals['field_description'], vals['ttype'],
815 vals['relation'], bool(vals['view_load']),
816 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['model'], vals['name']
822 # Goal: try to apply inheritance at the instanciation level and
823 # put objects in the pool var
826 def create_instance(cls, pool, cr):
827 """ Instanciate a given model.
829 This class method instanciates the class of some model (i.e. a class
830 deriving from osv or osv_memory). The class might be the class passed
831 in argument or, if it inherits from another class, a class constructed
832 by combining the two classes.
834 The ``attributes`` argument specifies which parent class attributes
837 TODO: the creation of the combined class is repeated at each call of
838 this method. This is probably unnecessary.
841 attributes = ['_columns', '_defaults', '_inherits', '_constraints',
844 parent_names = getattr(cls, '_inherit', None)
846 if isinstance(parent_names, (str, unicode)):
847 name = cls._name or parent_names
848 parent_names = [parent_names]
853 raise TypeError('_name is mandatory in case of multiple inheritance')
855 for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
856 parent_model = pool.get(parent_name)
857 if not getattr(cls, '_original_module', None) and name == parent_model._name:
858 cls._original_module = parent_model._original_module
860 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
861 'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
862 parent_class = parent_model.__class__
865 new = copy.copy(getattr(parent_model, s, {}))
867 # Don't _inherit custom fields.
871 if hasattr(new, 'update'):
872 new.update(cls.__dict__.get(s, {}))
873 elif s=='_constraints':
874 for c in cls.__dict__.get(s, []):
876 for c2 in range(len(new)):
877 #For _constraints, we should check field and methods as well
878 if new[c2][2]==c[2] and (new[c2][0] == c[0] \
879 or getattr(new[c2][0],'__name__', True) == \
880 getattr(c[0],'__name__', False)):
881 # If new class defines a constraint with
882 # same function name, we let it override
890 new.extend(cls.__dict__.get(s, []))
892 cls = type(name, (cls, parent_class), dict(nattr, _register=False))
893 if not getattr(cls, '_original_module', None):
894 cls._original_module = cls._module
895 obj = object.__new__(cls)
896 obj.__init__(pool, cr)
900 """Register this model.
902 This doesn't create an instance but simply register the model
903 as being part of the module where it is defined.
908 # Set the module name (e.g. base, sale, accounting, ...) on the class.
909 module = cls.__module__.split('.')[0]
910 if not hasattr(cls, '_module'):
913 # Record this class in the list of models to instantiate for this module,
914 # managed by the metaclass.
915 module_model_list = MetaModel.module_to_models.setdefault(cls._module, [])
916 if cls not in module_model_list:
917 module_model_list.append(cls)
919 # Since we don't return an instance here, the __init__
920 # method won't be called.
923 def __init__(self, pool, cr):
924 """ Initialize a model and make it part of the given registry.
926 - copy the stored fields' functions in the osv_pool,
927 - update the _columns with the fields found in ir_model_fields,
928 - ensure there is a many2one for each _inherits'd parent,
929 - update the children's _columns,
930 - give a chance to each field to initialize itself.
933 pool.add(self._name, self)
936 if not self._name and not hasattr(self, '_inherit'):
937 name = type(self).__name__.split('.')[0]
938 msg = "The class %s has to have a _name attribute" % name
940 logger = netsvc.Logger()
941 logger.notifyChannel('orm', netsvc.LOG_ERROR, msg)
942 raise except_orm('ValueError', msg)
944 if not self._description:
945 self._description = self._name
947 self._table = self._name.replace('.', '_')
949 if not hasattr(self, '_log_access'):
950 # If _log_access is not specified, it is the same value as _auto.
951 self._log_access = getattr(self, "_auto", True)
953 self._columns = self._columns.copy()
954 for store_field in self._columns:
955 f = self._columns[store_field]
956 if hasattr(f, 'digits_change'):
958 def not_this_field(stored_func):
959 x, y, z, e, f, l = stored_func
960 return x != self._name or y != store_field
961 self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, []))
962 if not isinstance(f, fields.function):
968 sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, 10, None)}
969 for object, aa in sm.items():
971 (fnct, fields2, order, length) = aa
973 (fnct, fields2, order) = aa
976 raise except_orm('Error',
977 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
978 self.pool._store_function.setdefault(object, [])
979 self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
980 self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
982 for (key, _, msg) in self._sql_constraints:
983 self.pool._sql_error[self._table+'_'+key] = msg
987 cr.execute("SELECT id FROM ir_model_fields WHERE name=%s AND model=%s", ('state', 'ir.model.fields'))
989 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual'))
990 for field in cr.dictfetchall():
991 if field['name'] in self._columns:
994 'string': field['field_description'],
995 'required': bool(field['required']),
996 'readonly': bool(field['readonly']),
997 'domain': eval(field['domain']) if field['domain'] else None,
998 'size': field['size'],
999 'ondelete': field['on_delete'],
1000 'translate': (field['translate']),
1002 #'select': int(field['select_level'])
1005 if field['ttype'] == 'selection':
1006 self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
1007 elif field['ttype'] == 'reference':
1008 self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
1009 elif field['ttype'] == 'many2one':
1010 self._columns[field['name']] = fields.many2one(field['relation'], **attrs)
1011 elif field['ttype'] == 'one2many':
1012 self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
1013 elif field['ttype'] == 'many2many':
1014 _rel1 = field['relation'].replace('.', '_')
1015 _rel2 = field['model'].replace('.', '_')
1016 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
1017 self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
1019 self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
1020 self._inherits_check()
1021 self._inherits_reload()
1022 if not self._sequence:
1023 self._sequence = self._table + '_id_seq'
1024 for k in self._defaults:
1025 assert (k in self._columns) or (k in self._inherit_fields), 'Default function defined in %s but field %s does not exist !' % (self._name, k,)
1026 for f in self._columns:
1027 self._columns[f].restart()
1030 if self.is_transient():
1031 self._transient_check_count = 0
1032 self._transient_max_count = config.get('osv_memory_count_limit')
1033 self._transient_max_hours = config.get('osv_memory_age_limit')
1034 assert self._log_access, "TransientModels must have log_access turned on, "\
1035 "in order to implement their access rights policy"
1037 def __export_row(self, cr, uid, row, fields, context=None):
1041 def check_type(field_type):
1042 if field_type == 'float':
1044 elif field_type == 'integer':
1046 elif field_type == 'boolean':
1050 def selection_field(in_field):
1051 col_obj = self.pool.get(in_field.keys()[0])
1052 if f[i] in col_obj._columns.keys():
1053 return col_obj._columns[f[i]]
1054 elif f[i] in col_obj._inherits.keys():
1055 selection_field(col_obj._inherits)
1060 data = map(lambda x: '', range(len(fields)))
1062 for fpos in range(len(fields)):
1071 model_data = self.pool.get('ir.model.data')
1072 data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])])
1074 d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
1076 r = '%s.%s' % (d['module'], d['name'])
1082 n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
1083 if not model_data.search(cr, uid, [('name', '=', n)]):
1086 model_data.create(cr, uid, {
1088 'model': self._name,
1090 'module': '__export__',
1095 # To display external name of selection field when its exported
1097 if f[i] in self._columns.keys():
1098 cols = self._columns[f[i]]
1099 elif f[i] in self._inherit_fields.keys():
1100 cols = selection_field(self._inherits)
1101 if cols and cols._type == 'selection':
1102 sel_list = cols.selection
1103 if r and type(sel_list) == type([]):
1104 r = [x[1] for x in sel_list if r==x[0]]
1105 r = r and r[0] or False
1107 if f[i] in self._columns:
1108 r = check_type(self._columns[f[i]]._type)
1109 elif f[i] in self._inherit_fields:
1110 r = check_type(self._inherit_fields[f[i]][2]._type)
1111 data[fpos] = r or False
1113 if isinstance(r, (browse_record_list, list)):
1115 fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \
1118 if [x for x in fields2 if x]:
1120 done.append(fields2)
1122 lines2 = self.__export_row(cr, uid, row2, fields2,
1125 for fpos2 in range(len(fields)):
1126 if lines2 and lines2[0][fpos2]:
1127 data[fpos2] = lines2[0][fpos2]
1131 name_relation = self.pool.get(rr._table_name)._rec_name
1132 if isinstance(rr[name_relation], browse_record):
1133 rr = rr[name_relation]
1134 rr_name = self.pool.get(rr._table_name).name_get(cr, uid, [rr.id], context=context)
1135 rr_name = rr_name and rr_name[0] and rr_name[0][1] or ''
1136 dt += tools.ustr(rr_name or '') + ','
1137 data[fpos] = dt[:-1]
1146 if isinstance(r, browse_record):
1147 r = self.pool.get(r._table_name).name_get(cr, uid, [r.id], context=context)
1148 r = r and r[0] and r[0][1] or ''
1149 data[fpos] = tools.ustr(r or '')
1150 return [data] + lines
1152 def export_data(self, cr, uid, ids, fields_to_export, context=None):
1154 Export fields for selected objects
1156 :param cr: database cursor
1157 :param uid: current user id
1158 :param ids: list of ids
1159 :param fields_to_export: list of fields
1160 :param context: context arguments, like lang, time zone
1161 :rtype: dictionary with a *datas* matrix
1163 This method is used when exporting data via client menu
1168 cols = self._columns.copy()
1169 for f in self._inherit_fields:
1170 cols.update({f: self._inherit_fields[f][2]})
1171 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
1173 for row in self.browse(cr, uid, ids, context):
1174 datas += self.__export_row(cr, uid, row, fields_to_export, context)
1175 return {'datas': datas}
1177 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
1179 Import given data in given module
1181 This method is used when importing data via client menu.
1183 Example of fields to import for a sale.order::
1186 partner_id, (=name_search)
1187 order_line/.id, (=database_id)
1189 order_line/product_id/id, (=xml id)
1190 order_line/price_unit,
1191 order_line/product_uom_qty,
1192 order_line/product_uom/id (=xml_id)
1194 This method returns a 4-tuple with the following structure:
1196 * The first item is a return code, it returns either ``-1`` in case o
1198 :param cr: database cursor
1199 :param uid: current user id
1200 :param fields: list of fields
1201 :param data: data to import
1202 :param mode: 'init' or 'update' for record creation
1203 :param current_module: module name
1204 :param noupdate: flag for record creation
1205 :param context: context arguments, like lang, time zone,
1206 :param filename: optional file to store partial import state for recovery
1207 :returns: 4-tuple of a return code, an errored resource, an error message and ???
1208 :rtype: (int, dict|0, str|0, ''|0)
1212 fields = map(fix_import_export_id_paths, fields)
1213 logger = netsvc.Logger()
1214 ir_model_data_obj = self.pool.get('ir.model.data')
1216 # mode: id (XML id) or .id (database id) or False for name_get
1217 def _get_id(model_name, id, current_module=False, mode='id'):
1220 obj_model = self.pool.get(model_name)
1221 ids = obj_model.search(cr, uid, [('id', '=', int(id))])
1223 raise Exception(_("Database ID doesn't exist: %s : %s") %(model_name, id))
1226 module, xml_id = id.rsplit('.', 1)
1228 module, xml_id = current_module, id
1229 record_id = ir_model_data_obj._get_id(cr, uid, module, xml_id)
1230 ir_model_data = ir_model_data_obj.read(cr, uid, [record_id], ['res_id'])
1231 if not ir_model_data:
1232 raise ValueError('No references to %s.%s' % (module, xml_id))
1233 id = ir_model_data[0]['res_id']
1235 obj_model = self.pool.get(model_name)
1236 ids = obj_model.name_search(cr, uid, id, operator='=', context=context)
1238 raise ValueError('No record found for %s' % (id,))
1243 # datas: a list of records, each record is defined by a list of values
1244 # prefix: a list of prefix fields ['line_ids']
1245 # position: the line to process, skip is False if it's the first line of the current record
1247 # (res, position, warning, res_id) with
1248 # res: the record for the next line to process (including it's one2many)
1249 # position: the new position for the next line
1250 # res_id: the ID of the record if it's a modification
1251 def process_liness(self, datas, prefix, current_module, model_name, fields_def, position=0, skip=0):
1252 line = datas[position]
1260 for i in range(len(fields)):
1263 raise Exception(_('Please check that all your lines have %d columns.'
1264 'Stopped around line %d having %d columns.') % \
1265 (len(fields), position+2, len(line)))
1270 if field[:len(prefix)] <> prefix:
1271 if line[i] and skip:
1275 #set the mode for m2o, o2m, m2m : xml_id/id/name
1276 if len(field) == len(prefix)+1:
1279 mode = field[len(prefix)+1]
1281 # TODO: improve this by using csv.csv_reader
1282 def many_ids(line, relation, current_module, mode):
1284 for db_id in line.split(config.get('csv_internal_sep')):
1285 res.append(_get_id(relation, db_id, current_module, mode))
1288 # ID of the record using a XML ID
1289 if field[len(prefix)]=='id':
1291 data_res_id = _get_id(model_name, line[i], current_module, 'id')
1297 # ID of the record using a database ID
1298 elif field[len(prefix)]=='.id':
1299 data_res_id = _get_id(model_name, line[i], current_module, '.id')
1302 # recursive call for getting children and returning [(0,0,{})] or [(1,ID,{})]
1303 if fields_def[field[len(prefix)]]['type']=='one2many':
1304 if field[len(prefix)] in done:
1306 done[field[len(prefix)]] = True
1307 relation = fields_def[field[len(prefix)]]['relation']
1308 relation_obj = self.pool.get(relation)
1309 newfd = relation_obj.fields_get( cr, uid, context=context )
1312 res = many_ids(line[i], relation, current_module, mode)
1315 while pos < len(datas):
1316 res2 = process_liness(self, datas, prefix + [field[len(prefix)]], current_module, relation_obj._name, newfd, pos, first)
1319 (newrow, pos, w2, data_res_id2, xml_id2) = res2
1320 nbrmax = max(nbrmax, pos)
1325 res.append((4, data_res_id2))
1327 if (not newrow) or not reduce(lambda x, y: x or y, newrow.values(), 0):
1330 res.append( (data_res_id2 and 1 or 0, data_res_id2 or 0, newrow) )
1333 elif fields_def[field[len(prefix)]]['type']=='many2one':
1334 relation = fields_def[field[len(prefix)]]['relation']
1335 res = _get_id(relation, line[i], current_module, mode)
1337 elif fields_def[field[len(prefix)]]['type']=='many2many':
1338 relation = fields_def[field[len(prefix)]]['relation']
1339 res = many_ids(line[i], relation, current_module, mode)
1341 elif fields_def[field[len(prefix)]]['type'] == 'integer':
1342 res = line[i] and int(line[i]) or 0
1343 elif fields_def[field[len(prefix)]]['type'] == 'boolean':
1344 res = line[i].lower() not in ('0', 'false', 'off')
1345 elif fields_def[field[len(prefix)]]['type'] == 'float':
1346 res = line[i] and float(line[i]) or 0.0
1347 elif fields_def[field[len(prefix)]]['type'] == 'selection':
1348 for key, val in fields_def[field[len(prefix)]]['selection']:
1349 if tools.ustr(line[i]) in [tools.ustr(key), tools.ustr(val)]:
1352 if line[i] and not res:
1353 logger.notifyChannel("import", netsvc.LOG_WARNING,
1354 _("key '%s' not found in selection field '%s'") % \
1355 (tools.ustr(line[i]), tools.ustr(field[len(prefix)])))
1356 warning += [_("Key/value '%s' not found in selection field '%s'") % (tools.ustr(line[i]), tools.ustr(field[len(prefix)]))]
1361 row[field[len(prefix)]] = res or False
1363 result = (row, nbrmax, warning, data_res_id, xml_id)
1366 fields_def = self.fields_get(cr, uid, context=context)
1368 if config.get('import_partial', False) and filename:
1369 data = pickle.load(file(config.get('import_partial')))
1372 while position<len(datas):
1375 (res, position, warning, res_id, xml_id) = \
1376 process_liness(self, datas, [], current_module, self._name, fields_def, position=position)
1379 return (-1, res, 'Line ' + str(position) +' : ' + '!\n'.join(warning), '')
1382 ir_model_data_obj._update(cr, uid, self._name,
1383 current_module, res, mode=mode, xml_id=xml_id,
1384 noupdate=noupdate, res_id=res_id, context=context)
1385 except Exception, e:
1386 return (-1, res, 'Line ' + str(position) + ' : ' + tools.ustr(e), '')
1388 if config.get('import_partial', False) and filename and (not (position%100)):
1389 data = pickle.load(file(config.get('import_partial')))
1390 data[filename] = position
1391 pickle.dump(data, file(config.get('import_partial'), 'wb'))
1392 if context.get('defer_parent_store_computation'):
1393 self._parent_store_compute(cr)
1396 if context.get('defer_parent_store_computation'):
1397 self._parent_store_compute(cr)
1398 return (position, 0, 0, 0)
1400 def get_invalid_fields(self, cr, uid):
1401 return list(self._invalids)
1403 def _validate(self, cr, uid, ids, context=None):
1404 context = context or {}
1405 lng = context.get('lang', False) or 'en_US'
1406 trans = self.pool.get('ir.translation')
1408 for constraint in self._constraints:
1409 fun, msg, fields = constraint
1410 if not fun(self, cr, uid, ids):
1411 # Check presence of __call__ directly instead of using
1412 # callable() because it will be deprecated as of Python 3.0
1413 if hasattr(msg, '__call__'):
1414 tmp_msg = msg(self, cr, uid, ids, context=context)
1415 if isinstance(tmp_msg, tuple):
1416 tmp_msg, params = tmp_msg
1417 translated_msg = tmp_msg % params
1419 translated_msg = tmp_msg
1421 translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg) or msg
1423 _("Error occurred while validating the field(s) %s: %s") % (','.join(fields), translated_msg)
1425 self._invalids.update(fields)
1428 raise except_orm('ValidateError', '\n'.join(error_msgs))
1430 self._invalids.clear()
1432 def default_get(self, cr, uid, fields_list, context=None):
1434 Returns default values for the fields in fields_list.
1436 :param fields_list: list of fields to get the default values for (example ['field1', 'field2',])
1437 :type fields_list: list
1438 :param context: optional context dictionary - it may contains keys for specifying certain options
1439 like ``context_lang`` (language) or ``context_tz`` (timezone) to alter the results of the call.
1440 It may contain keys in the form ``default_XXX`` (where XXX is a field name), to set
1441 or override a default value for a field.
1442 A special ``bin_size`` boolean flag may also be passed in the context to request the
1443 value of all fields.binary columns to be returned as the size of the binary instead of its
1444 contents. This can also be selectively overriden by passing a field-specific flag
1445 in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
1446 Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
1447 :return: dictionary of the default values (set on the object model class, through user preferences, or in the context)
1449 # trigger view init hook
1450 self.view_init(cr, uid, fields_list, context)
1456 # get the default values for the inherited fields
1457 for t in self._inherits.keys():
1458 defaults.update(self.pool.get(t).default_get(cr, uid, fields_list,
1461 # get the default values defined in the object
1462 for f in fields_list:
1463 if f in self._defaults:
1464 if callable(self._defaults[f]):
1465 defaults[f] = self._defaults[f](self, cr, uid, context)
1467 defaults[f] = self._defaults[f]
1469 fld_def = ((f in self._columns) and self._columns[f]) \
1470 or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
1473 if isinstance(fld_def, fields.property):
1474 property_obj = self.pool.get('ir.property')
1475 prop_value = property_obj.get(cr, uid, f, self._name, context=context)
1477 if isinstance(prop_value, (browse_record, browse_null)):
1478 defaults[f] = prop_value.id
1480 defaults[f] = prop_value
1482 if f not in defaults:
1485 # get the default values set by the user and override the default
1486 # values defined in the object
1487 ir_values_obj = self.pool.get('ir.values')
1488 res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
1489 for id, field, field_value in res:
1490 if field in fields_list:
1491 fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
1492 if fld_def._type in ('many2one', 'one2one'):
1493 obj = self.pool.get(fld_def._obj)
1494 if not obj.search(cr, uid, [('id', '=', field_value or False)]):
1496 if fld_def._type in ('many2many'):
1497 obj = self.pool.get(fld_def._obj)
1499 for i in range(len(field_value)):
1500 if not obj.search(cr, uid, [('id', '=',
1503 field_value2.append(field_value[i])
1504 field_value = field_value2
1505 if fld_def._type in ('one2many'):
1506 obj = self.pool.get(fld_def._obj)
1508 for i in range(len(field_value)):
1509 field_value2.append({})
1510 for field2 in field_value[i]:
1511 if field2 in obj._columns.keys() and obj._columns[field2]._type in ('many2one', 'one2one'):
1512 obj2 = self.pool.get(obj._columns[field2]._obj)
1513 if not obj2.search(cr, uid,
1514 [('id', '=', field_value[i][field2])]):
1516 elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type in ('many2one', 'one2one'):
1517 obj2 = self.pool.get(obj._inherit_fields[field2][2]._obj)
1518 if not obj2.search(cr, uid,
1519 [('id', '=', field_value[i][field2])]):
1521 # TODO add test for many2many and one2many
1522 field_value2[i][field2] = field_value[i][field2]
1523 field_value = field_value2
1524 defaults[field] = field_value
1526 # get the default values from the context
1527 for key in context or {}:
1528 if key.startswith('default_') and (key[8:] in fields_list):
1529 defaults[key[8:]] = context[key]
1532 def fields_get_keys(self, cr, user, context=None):
1533 res = self._columns.keys()
1534 # TODO I believe this loop can be replace by
1535 # res.extend(self._inherit_fields.key())
1536 for parent in self._inherits:
1537 res.extend(self.pool.get(parent).fields_get_keys(cr, user, context))
1541 # Overload this method if you need a window title which depends on the context
1543 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1546 def __view_look_dom(self, cr, user, node, view_id, in_tree_view, model_fields, context=None):
1547 """ Return the description of the fields in the node.
1549 In a normal call to this method, node is a complete view architecture
1550 but it is actually possible to give some sub-node (this is used so
1551 that the method can call itself recursively).
1553 Originally, the field descriptions are drawn from the node itself.
1554 But there is now some code calling fields_get() in order to merge some
1555 of those information in the architecture.
1567 if isinstance(s, unicode):
1568 return s.encode('utf8')
1571 def check_group(node):
1572 """ Set invisible to true if the user is not in the specified groups. """
1573 if node.get('groups'):
1574 groups = node.get('groups').split(',')
1575 ir_model_access = self.pool.get('ir.model.access')
1576 can_see = any(ir_model_access.check_groups(cr, user, group) for group in groups)
1578 node.set('invisible', '1')
1579 modifiers['invisible'] = True
1580 if 'attrs' in node.attrib:
1581 del(node.attrib['attrs']) #avoid making field visible later
1582 del(node.attrib['groups'])
1584 if node.tag in ('field', 'node', 'arrow'):
1585 if node.get('object'):
1590 if f.tag in ('field'):
1591 xml += etree.tostring(f, encoding="utf-8")
1593 new_xml = etree.fromstring(encode(xml))
1594 ctx = context.copy()
1595 ctx['base_model_name'] = self._name
1596 xarch, xfields = self.pool.get(node.get('object')).__view_look_dom_arch(cr, user, new_xml, view_id, ctx)
1601 attrs = {'views': views}
1603 if node.get('name'):
1606 if node.get('name') in self._columns:
1607 column = self._columns[node.get('name')]
1609 column = self._inherit_fields[node.get('name')][2]
1614 relation = self.pool.get(column._obj)
1619 if f.tag in ('form', 'tree', 'graph'):
1621 ctx = context.copy()
1622 ctx['base_model_name'] = self._name
1623 xarch, xfields = relation.__view_look_dom_arch(cr, user, f, view_id, ctx)
1624 views[str(f.tag)] = {
1628 attrs = {'views': views}
1629 if node.get('widget') and node.get('widget') == 'selection':
1630 # Prepare the cached selection list for the client. This needs to be
1631 # done even when the field is invisible to the current user, because
1632 # other events could need to change its value to any of the selectable ones
1633 # (such as on_change events, refreshes, etc.)
1635 # If domain and context are strings, we keep them for client-side, otherwise
1636 # we evaluate them server-side to consider them when generating the list of
1638 # TODO: find a way to remove this hack, by allow dynamic domains
1640 if column._domain and not isinstance(column._domain, basestring):
1641 dom = column._domain
1642 dom += eval(node.get('domain', '[]'), {'uid': user, 'time': time})
1643 search_context = dict(context)
1644 if column._context and not isinstance(column._context, basestring):
1645 search_context.update(column._context)
1646 attrs['selection'] = relation._name_search(cr, user, '', dom, context=search_context, limit=None, name_get_uid=1)
1647 if (node.get('required') and not int(node.get('required'))) or not column.required:
1648 attrs['selection'].append((False, ''))
1649 fields[node.get('name')] = attrs
1651 field = model_fields.get(node.get('name'))
1653 transfer_field_to_modifiers(field, modifiers)
1656 elif node.tag in ('form', 'tree'):
1657 result = self.view_header_get(cr, user, False, node.tag, context)
1659 node.set('string', result)
1660 in_tree_view = node.tag == 'tree'
1662 elif node.tag == 'calendar':
1663 for additional_field in ('date_start', 'date_delay', 'date_stop', 'color'):
1664 if node.get(additional_field):
1665 fields[node.get(additional_field)] = {}
1669 # The view architeture overrides the python model.
1670 # Get the attrs before they are (possibly) deleted by check_group below
1671 transfer_node_to_modifiers(node, modifiers, context, in_tree_view)
1673 # TODO remove attrs couterpart in modifiers when invisible is true ?
1676 if 'lang' in context:
1677 if node.get('string') and not result:
1678 trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('string'))
1679 if trans == node.get('string') and ('base_model_name' in context):
1680 # If translation is same as source, perhaps we'd have more luck with the alternative model name
1681 # (in case we are in a mixed situation, such as an inherited view where parent_view.model != model
1682 trans = self.pool.get('ir.translation')._get_source(cr, user, context['base_model_name'], 'view', context['lang'], node.get('string'))
1684 node.set('string', trans)
1685 if node.get('confirm'):
1686 trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('confirm'))
1688 node.set('confirm', trans)
1690 trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('sum'))
1692 node.set('sum', trans)
1693 if node.get('help'):
1694 trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('help'))
1696 node.set('help', trans)
1699 if children or (node.tag == 'field' and f.tag in ('filter','separator')):
1700 fields.update(self.__view_look_dom(cr, user, f, view_id, in_tree_view, model_fields, context))
1702 transfer_modifiers_to_node(modifiers, node)
1705 def _disable_workflow_buttons(self, cr, user, node):
1706 """ Set the buttons in node to readonly if the user can't activate them. """
1708 # admin user can always activate workflow buttons
1711 # TODO handle the case of more than one workflow for a model or multiple
1712 # transitions with different groups and same signal
1713 usersobj = self.pool.get('res.users')
1714 buttons = (n for n in node.getiterator('button') if n.get('type') != 'object')
1715 for button in buttons:
1716 user_groups = usersobj.read(cr, user, [user], ['groups_id'])[0]['groups_id']
1717 cr.execute("""SELECT DISTINCT t.group_id
1719 INNER JOIN wkf_activity a ON a.wkf_id = wkf.id
1720 INNER JOIN wkf_transition t ON (t.act_to = a.id)
1723 AND t.group_id is NOT NULL
1724 """, (self._name, button.get('name')))
1725 group_ids = [x[0] for x in cr.fetchall() if x[0]]
1726 can_click = not group_ids or bool(set(user_groups).intersection(group_ids))
1727 button.set('readonly', str(int(not can_click)))
1730 def __view_look_dom_arch(self, cr, user, node, view_id, context=None):
1731 """ Return an architecture and a description of all the fields.
1733 The field description combines the result of fields_get() and
1736 :param node: the architecture as as an etree
1737 :return: a tuple (arch, fields) where arch is the given node as a
1738 string and fields is the description of all the fields.
1742 if node.tag == 'diagram':
1743 if node.getchildren()[0].tag == 'node':
1744 node_fields = self.pool.get(node.getchildren()[0].get('object')).fields_get(cr, user, None, context)
1745 fields.update(node_fields)
1746 if node.getchildren()[1].tag == 'arrow':
1747 arrow_fields = self.pool.get(node.getchildren()[1].get('object')).fields_get(cr, user, None, context)
1748 fields.update(arrow_fields)
1750 fields = self.fields_get(cr, user, None, context)
1751 fields_def = self.__view_look_dom(cr, user, node, view_id, False, fields, context=context)
1752 node = self._disable_workflow_buttons(cr, user, node)
1753 arch = etree.tostring(node, encoding="utf-8").replace('\t', '')
1754 for k in fields.keys():
1755 if k not in fields_def:
1757 for field in fields_def:
1759 # sometime, the view may contain the (invisible) field 'id' needed for a domain (when 2 objects have cross references)
1760 fields['id'] = {'readonly': True, 'type': 'integer', 'string': 'ID'}
1761 elif field in fields:
1762 fields[field].update(fields_def[field])
1764 cr.execute('select name, model from ir_ui_view where (id=%s or inherit_id=%s) and arch like %s', (view_id, view_id, '%%%s%%' % field))
1765 res = cr.fetchall()[:]
1767 res.insert(0, ("Can't find field '%s' in the following view parts composing the view of object model '%s':" % (field, model), None))
1768 msg = "\n * ".join([r[0] for r in res])
1769 msg += "\n\nEither you wrongly customized this view, or some modules bringing those views are not compatible with your current data model"
1770 netsvc.Logger().notifyChannel('orm', netsvc.LOG_ERROR, msg)
1771 raise except_orm('View error', msg)
1774 def _get_default_form_view(self, cr, user, context=None):
1775 """ Generates a default single-line form view using all fields
1776 of the current model except the m2m and o2m ones.
1778 :param cr: database cursor
1779 :param int user: user id
1780 :param dict context: connection context
1781 :returns: a form view as an lxml document
1782 :rtype: etree._Element
1784 view = etree.Element('form', string=self._description)
1785 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
1786 for field, descriptor in self.fields_get(cr, user, context=context).iteritems():
1787 if descriptor['type'] in ('one2many', 'many2many'):
1789 etree.SubElement(view, 'field', name=field)
1790 if descriptor['type'] == 'text':
1791 etree.SubElement(view, 'newline')
1794 def _get_default_tree_view(self, cr, user, context=None):
1795 """ Generates a single-field tree view, using _rec_name if
1796 it's one of the columns or the first column it finds otherwise
1798 :param cr: database cursor
1799 :param int user: user id
1800 :param dict context: connection context
1801 :returns: a tree view as an lxml document
1802 :rtype: etree._Element
1804 _rec_name = self._rec_name
1805 if _rec_name not in self._columns:
1806 _rec_name = self._columns.keys()[0]
1808 view = etree.Element('tree', string=self._description)
1809 etree.SubElement(view, 'field', name=_rec_name)
1812 def _get_default_calendar_view(self, cr, user, context=None):
1813 """ Generates a default calendar view by trying to infer
1814 calendar fields from a number of pre-set attribute names
1816 :param cr: database cursor
1817 :param int user: user id
1818 :param dict context: connection context
1819 :returns: a calendar view
1820 :rtype: etree._Element
1822 def set_first_of(seq, in_, to):
1823 """Sets the first value of ``seq`` also found in ``in_`` to
1824 the ``to`` attribute of the view being closed over.
1826 Returns whether it's found a suitable value (and set it on
1827 the attribute) or not
1835 view = etree.Element('calendar', string=self._description)
1836 etree.SubElement(view, 'field', name=self._rec_name)
1838 if (self._date_name not in self._columns):
1840 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1841 if dt in self._columns:
1842 self._date_name = dt
1847 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1848 view.set('date_start', self._date_name)
1850 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1851 self._columns, 'color')
1853 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1854 self._columns, 'date_stop'):
1855 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1856 self._columns, 'date_delay'):
1858 _('Invalid Object Architecture!'),
1859 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % (self._name)))
1863 def _get_default_search_view(self, cr, uid, context=None):
1865 :param cr: database cursor
1866 :param int user: user id
1867 :param dict context: connection context
1868 :returns: an lxml document of the view
1869 :rtype: etree._Element
1871 form_view = self.fields_view_get(cr, uid, False, 'form', context=context)
1872 tree_view = self.fields_view_get(cr, uid, False, 'tree', context=context)
1874 # TODO it seems _all_columns could be used instead of fields_get (no need for translated fields info)
1875 fields = self.fields_get(cr, uid, context=context)
1876 fields_to_search = set(
1877 field for field, descriptor in fields.iteritems()
1878 if descriptor.get('select'))
1880 for view in (form_view, tree_view):
1881 view_root = etree.fromstring(view['arch'])
1882 # Only care about select=1 in xpath below, because select=2 is covered
1883 # by the custom advanced search in clients
1884 fields_to_search.update(view_root.xpath("//field[@select=1]/@name"))
1886 tree_view_root = view_root # as provided by loop above
1887 search_view = etree.Element("search", string=tree_view_root.get("string", ""))
1889 field_group = etree.SubElement(search_view, "group")
1890 for field_name in fields_to_search:
1891 etree.SubElement(field_group, "field", name=field_name)
1896 # if view_id, view_type is not required
1898 def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1900 Get the detailed composition of the requested view like fields, model, view architecture
1902 :param cr: database cursor
1903 :param user: current user id
1904 :param view_id: id of the view or None
1905 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1906 :param context: context arguments, like lang, time zone
1907 :param toolbar: true to include contextual actions
1908 :param submenu: deprecated
1909 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1910 :raise AttributeError:
1911 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1912 * if some tag other than 'position' is found in parent view
1913 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1920 if isinstance(s, unicode):
1921 return s.encode('utf8')
1924 def raise_view_error(error_msg, child_view_id):
1925 view, child_view = self.pool.get('ir.ui.view').browse(cr, user, [view_id, child_view_id], context)
1926 raise AttributeError("View definition error for inherited view '%s' on model '%s': %s"
1927 % (child_view.xml_id, self._name, error_msg))
1929 def locate(source, spec):
1930 """ Locate a node in a source (parent) architecture.
1932 Given a complete source (parent) architecture (i.e. the field
1933 `arch` in a view), and a 'spec' node (a node in an inheriting
1934 view that specifies the location in the source view of what
1935 should be changed), return (if it exists) the node in the
1936 source view matching the specification.
1938 :param source: a parent architecture to modify
1939 :param spec: a modifying node in an inheriting view
1940 :return: a node in the source matching the spec
1943 if spec.tag == 'xpath':
1944 nodes = source.xpath(spec.get('expr'))
1945 return nodes[0] if nodes else None
1946 elif spec.tag == 'field':
1947 # Only compare the field name: a field can be only once in a given view
1948 # at a given level (and for multilevel expressions, we should use xpath
1949 # inheritance spec anyway).
1950 for node in source.getiterator('field'):
1951 if node.get('name') == spec.get('name'):
1955 for node in source.getiterator(spec.tag):
1957 for attr in spec.attrib:
1958 if attr != 'position' and (not node.get(attr) or node.get(attr) != spec.get(attr)):
1965 def apply_inheritance_specs(source, specs_arch, inherit_id=None):
1966 """ Apply an inheriting view.
1968 Apply to a source architecture all the spec nodes (i.e. nodes
1969 describing where and what changes to apply to some parent
1970 architecture) given by an inheriting view.
1972 :param source: a parent architecture to modify
1973 :param specs_arch: a modifying architecture in an inheriting view
1974 :param inherit_id: the database id of the inheriting view
1975 :return: a modified source where the specs are applied
1978 specs_tree = etree.fromstring(encode(specs_arch))
1979 # Queue of specification nodes (i.e. nodes describing where and
1980 # changes to apply to some parent architecture).
1981 specs = [specs_tree]
1985 if isinstance(spec, SKIPPED_ELEMENT_TYPES):
1987 if spec.tag == 'data':
1988 specs += [ c for c in specs_tree ]
1990 node = locate(source, spec)
1991 if node is not None:
1992 pos = spec.get('position', 'inside')
1993 if pos == 'replace':
1994 if node.getparent() is None:
1995 source = copy.deepcopy(spec[0])
1998 node.addprevious(child)
1999 node.getparent().remove(node)
2000 elif pos == 'attributes':
2001 for child in spec.getiterator('attribute'):
2002 attribute = (child.get('name'), child.text and child.text.encode('utf8') or None)
2004 node.set(attribute[0], attribute[1])
2006 del(node.attrib[attribute[0]])
2008 sib = node.getnext()
2012 elif pos == 'after':
2017 sib.addprevious(child)
2018 elif pos == 'before':
2019 node.addprevious(child)
2021 raise_view_error("Invalid position value: '%s'" % pos, inherit_id)
2024 ' %s="%s"' % (attr, spec.get(attr))
2025 for attr in spec.attrib
2026 if attr != 'position'
2028 tag = "<%s%s>" % (spec.tag, attrs)
2029 raise_view_error("Element '%s' not found in parent view '%%(parent_xml_id)s'" % tag, inherit_id)
2032 def apply_view_inheritance(cr, user, source, inherit_id):
2033 """ Apply all the (directly and indirectly) inheriting views.
2035 :param source: a parent architecture to modify (with parent
2036 modifications already applied)
2037 :param inherit_id: the database view_id of the parent view
2038 :return: a modified source where all the modifying architecture
2042 sql_inherit = self.pool.get('ir.ui.view').get_inheriting_views_arch(cr, user, inherit_id, self._name)
2043 for (view_arch, view_id) in sql_inherit:
2044 source = apply_inheritance_specs(source, view_arch, view_id)
2045 source = apply_view_inheritance(cr, user, source, view_id)
2048 result = {'type': view_type, 'model': self._name}
2051 parent_view_model = None
2052 view_ref = context.get(view_type + '_view_ref')
2053 # Search for a root (i.e. without any parent) view.
2055 if view_ref and not view_id:
2057 module, view_ref = view_ref.split('.', 1)
2058 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
2059 view_ref_res = cr.fetchone()
2061 view_id = view_ref_res[0]
2064 cr.execute("""SELECT arch,name,field_parent,id,type,inherit_id,model
2066 WHERE id=%s""", (view_id,))
2068 cr.execute("""SELECT arch,name,field_parent,id,type,inherit_id,model
2070 WHERE model=%s AND type=%s AND inherit_id IS NULL
2071 ORDER BY priority""", (self._name, view_type))
2072 sql_res = cr.dictfetchone()
2077 view_id = sql_res['inherit_id'] or sql_res['id']
2078 parent_view_model = sql_res['model']
2079 if not sql_res['inherit_id']:
2082 # if a view was found
2084 source = etree.fromstring(encode(sql_res['arch']))
2086 arch=apply_view_inheritance(cr, user, source, sql_res['id']),
2087 type=sql_res['type'],
2088 view_id=sql_res['id'],
2089 name=sql_res['name'],
2090 field_parent=sql_res['field_parent'] or False)
2092 # otherwise, build some kind of default view
2094 view = getattr(self, '_get_default_%s_view' % view_type)(
2096 except AttributeError:
2097 # what happens here, graph case?
2098 raise except_orm(_('Invalid Architecture!'), _("There is no view of type '%s' defined for the structure!") % view_type)
2106 if parent_view_model != self._name:
2107 ctx = context.copy()
2108 ctx['base_model_name'] = parent_view_model
2111 xarch, xfields = self.__view_look_dom_arch(cr, user, result['arch'], view_id, context=ctx)
2112 result['arch'] = xarch
2113 result['fields'] = xfields
2118 for key in ('report_sxw_content', 'report_rml_content',
2119 'report_sxw', 'report_rml',
2120 'report_sxw_content_data', 'report_rml_content_data'):
2124 ir_values_obj = self.pool.get('ir.values')
2125 resprint = ir_values_obj.get(cr, user, 'action',
2126 'client_print_multi', [(self._name, False)], False,
2128 resaction = ir_values_obj.get(cr, user, 'action',
2129 'client_action_multi', [(self._name, False)], False,
2132 resrelate = ir_values_obj.get(cr, user, 'action',
2133 'client_action_relate', [(self._name, False)], False,
2135 resaction = [clean(action) for action in resaction
2136 if view_type == 'tree' or not action[2].get('multi')]
2137 resprint = [clean(print_) for print_ in resprint
2138 if view_type == 'tree' or not print_[2].get('multi')]
2139 resrelate = map(lambda x: x[2], resrelate)
2141 for x in itertools.chain(resprint, resaction, resrelate):
2142 x['string'] = x['name']
2144 result['toolbar'] = {
2146 'action': resaction,
2151 _view_look_dom_arch = __view_look_dom_arch
2153 def search_count(self, cr, user, args, context=None):
2156 res = self.search(cr, user, args, context=context, count=True)
2157 if isinstance(res, list):
2161 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
2163 Search for records based on a search domain.
2165 :param cr: database cursor
2166 :param user: current user id
2167 :param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
2168 :param offset: optional number of results to skip in the returned values (default: 0)
2169 :param limit: optional max number of records to return (default: **None**)
2170 :param order: optional columns to sort by (default: self._order=id )
2171 :param context: optional context arguments, like lang, time zone
2172 :type context: dictionary
2173 :param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
2174 :return: id or list of ids of records matching the criteria
2175 :rtype: integer or list of integers
2176 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
2178 **Expressing a search domain (args)**
2180 Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
2182 * **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
2183 * **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
2184 The semantics of most of these operators are obvious.
2185 The ``child_of`` operator will look for records who are children or grand-children of a given record,
2186 according to the semantics of this model (i.e following the relationship field named by
2187 ``self._parent_name``, by default ``parent_id``.
2188 * **value** must be a valid value to compare with the values of **field_name**, depending on its type.
2190 Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
2191 These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
2192 Be very careful about this when you combine them the first time.
2194 Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
2196 [('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
2198 The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
2200 (name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
2203 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
2205 def name_get(self, cr, user, ids, context=None):
2206 """Returns the preferred display value (text representation) for the records with the
2207 given ``ids``. By default this will be the value of the ``name`` column, unless
2208 the model implements a custom behavior.
2209 Can sometimes be seen as the inverse function of :meth:`~.name_search`, but it is not
2213 :return: list of pairs ``(id,text_repr)`` for all records with the given ``ids``.
2217 if isinstance(ids, (int, long)):
2219 return [(r['id'], tools.ustr(r[self._rec_name])) for r in self.read(cr, user, ids,
2220 [self._rec_name], context, load='_classic_write')]
2222 def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
2223 """Search for records that have a display name matching the given ``name`` pattern if compared
2224 with the given ``operator``, while also matching the optional search domain (``args``).
2225 This is used for example to provide suggestions based on a partial value for a relational
2227 Sometimes be seen as the inverse function of :meth:`~.name_get`, but it is not
2230 This method is equivalent to calling :meth:`~.search` with a search domain based on ``name``
2231 and then :meth:`~.name_get` on the result of the search.
2233 :param list args: optional search domain (see :meth:`~.search` for syntax),
2234 specifying further restrictions
2235 :param str operator: domain operator for matching the ``name`` pattern, such as ``'like'``
2237 :param int limit: optional max number of records to return
2239 :return: list of pairs ``(id,text_repr)`` for all matching records.
2241 return self._name_search(cr, user, name, args, operator, context, limit)
2243 def name_create(self, cr, uid, name, context=None):
2244 """Creates a new record by calling :meth:`~.create` with only one
2245 value provided: the name of the new record (``_rec_name`` field).
2246 The new record will also be initialized with any default values applicable
2247 to this model, or provided through the context. The usual behavior of
2248 :meth:`~.create` applies.
2249 Similarly, this method may raise an exception if the model has multiple
2250 required fields and some do not have default values.
2252 :param name: name of the record to create
2255 :return: the :meth:`~.name_get` pair value for the newly-created record.
2257 rec_id = self.create(cr, uid, {self._rec_name: name}, context);
2258 return self.name_get(cr, uid, [rec_id], context)[0]
2260 # private implementation of name_search, allows passing a dedicated user for the name_get part to
2261 # solve some access rights issues
2262 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
2269 args += [(self._rec_name, operator, name)]
2270 access_rights_uid = name_get_uid or user
2271 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
2272 res = self.name_get(cr, access_rights_uid, ids, context)
2275 def read_string(self, cr, uid, id, langs, fields=None, context=None):
2278 self.pool.get('ir.translation').check_read(cr, uid)
2280 fields = self._columns.keys() + self._inherit_fields.keys()
2281 #FIXME: collect all calls to _get_source into one SQL call.
2283 res[lang] = {'code': lang}
2285 if f in self._columns:
2286 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
2288 res[lang][f] = res_trans
2290 res[lang][f] = self._columns[f].string
2291 for table in self._inherits:
2292 cols = intersect(self._inherit_fields.keys(), fields)
2293 res2 = self.pool.get(table).read_string(cr, uid, id, langs, cols, context)
2296 res[lang]['code'] = lang
2297 for f in res2[lang]:
2298 res[lang][f] = res2[lang][f]
2301 def write_string(self, cr, uid, id, langs, vals, context=None):
2302 self.pool.get('ir.translation').check_write(cr, uid)
2303 #FIXME: try to only call the translation in one SQL
2306 if field in self._columns:
2307 src = self._columns[field].string
2308 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
2309 for table in self._inherits:
2310 cols = intersect(self._inherit_fields.keys(), vals)
2312 self.pool.get(table).write_string(cr, uid, id, langs, vals, context)
2315 def _add_missing_default_values(self, cr, uid, values, context=None):
2316 missing_defaults = []
2317 avoid_tables = [] # avoid overriding inherited values when parent is set
2318 for tables, parent_field in self._inherits.items():
2319 if parent_field in values:
2320 avoid_tables.append(tables)
2321 for field in self._columns.keys():
2322 if not field in values:
2323 missing_defaults.append(field)
2324 for field in self._inherit_fields.keys():
2325 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
2326 missing_defaults.append(field)
2328 if len(missing_defaults):
2329 # override defaults with the provided values, never allow the other way around
2330 defaults = self.default_get(cr, uid, missing_defaults, context)
2332 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
2333 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
2334 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
2335 defaults[dv] = [(6, 0, defaults[dv])]
2336 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
2337 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
2338 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
2339 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
2340 defaults.update(values)
2344 def clear_caches(self):
2345 """ Clear the caches
2347 This clears the caches associated to methods decorated with
2348 ``tools.ormcache`` or ``tools.ormcache_multi``.
2351 getattr(self, '_ormcache')
2353 except AttributeError:
2356 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
2358 Get the list of records in list view grouped by the given ``groupby`` fields
2360 :param cr: database cursor
2361 :param uid: current user id
2362 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2363 :param list fields: list of fields present in the list view specified on the object
2364 :param list groupby: fields by which the records will be grouped
2365 :param int offset: optional number of records to skip
2366 :param int limit: optional max number of records to return
2367 :param dict context: context arguments, like lang, time zone
2368 :param list orderby: optional ``order by`` specification, for
2369 overriding the natural sort ordering of the
2370 groups, see also :py:meth:`~osv.osv.osv.search`
2371 (supported only for many2one fields currently)
2372 :return: list of dictionaries(one dictionary for each record) containing:
2374 * the values of fields grouped by the fields in ``groupby`` argument
2375 * __domain: list of tuples specifying the search criteria
2376 * __context: dictionary with argument like ``groupby``
2377 :rtype: [{'field_name_1': value, ...]
2378 :raise AccessError: * if user has no read rights on the requested object
2379 * if user tries to bypass access rules for read on the requested object
2382 context = context or {}
2383 self.check_read(cr, uid)
2385 fields = self._columns.keys()
2387 query = self._where_calc(cr, uid, domain, context=context)
2388 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2390 # Take care of adding join(s) if groupby is an '_inherits'ed field
2391 groupby_list = groupby
2392 qualified_groupby_field = groupby
2394 if isinstance(groupby, list):
2395 groupby = groupby[0]
2396 qualified_groupby_field = self._inherits_join_calc(groupby, query)
2399 assert not groupby or groupby in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2400 groupby_def = self._columns.get(groupby) or (self._inherit_fields.get(groupby) and self._inherit_fields.get(groupby)[2])
2401 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2403 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
2404 fget = self.fields_get(cr, uid, fields)
2405 float_int_fields = filter(lambda x: fget[x]['type'] in ('float', 'integer'), fields)
2407 group_count = group_by = groupby
2409 if fget.get(groupby):
2410 if fget[groupby]['type'] in ('date', 'datetime'):
2411 flist = "to_char(%s,'yyyy-mm') as %s " % (qualified_groupby_field, groupby)
2412 groupby = "to_char(%s,'yyyy-mm')" % (qualified_groupby_field)
2413 qualified_groupby_field = groupby
2415 flist = qualified_groupby_field
2417 # Don't allow arbitrary values, as this would be a SQL injection vector!
2418 raise except_orm(_('Invalid group_by'),
2419 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(groupby,))
2422 fields_pre = [f for f in float_int_fields if
2423 f == self.CONCURRENCY_CHECK_FIELD
2424 or (f in self._columns and getattr(self._columns[f], '_classic_write'))]
2425 for f in fields_pre:
2426 if f not in ['id', 'sequence']:
2427 group_operator = fget[f].get('group_operator', 'sum')
2430 qualified_field = '"%s"."%s"' % (self._table, f)
2431 flist += "%s(%s) AS %s" % (group_operator, qualified_field, f)
2433 gb = groupby and (' GROUP BY ' + qualified_groupby_field) or ''
2435 from_clause, where_clause, where_clause_params = query.get_sql()
2436 where_clause = where_clause and ' WHERE ' + where_clause
2437 limit_str = limit and ' limit %d' % limit or ''
2438 offset_str = offset and ' offset %d' % offset or ''
2439 if len(groupby_list) < 2 and context.get('group_by_no_leaf'):
2441 cr.execute('SELECT min(%s.id) AS id, count(%s.id) AS %s_count' % (self._table, self._table, group_count) + (flist and ',') + flist + ' FROM ' + from_clause + where_clause + gb + limit_str + offset_str, where_clause_params)
2444 for r in cr.dictfetchall():
2445 for fld, val in r.items():
2446 if val == None: r[fld] = False
2447 alldata[r['id']] = r
2450 data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=orderby or groupby, context=context)
2451 # the IDS of records that have groupby field value = False or '' should be sorted too
2452 data_ids += filter(lambda x:x not in data_ids, alldata.keys())
2453 data = self.read(cr, uid, data_ids, groupby and [groupby] or ['id'], context=context)
2454 # restore order of the search as read() uses the default _order (this is only for groups, so the size of data_read shoud be small):
2455 data.sort(lambda x,y: cmp(data_ids.index(x['id']), data_ids.index(y['id'])))
2459 d['__domain'] = [(groupby, '=', alldata[d['id']][groupby] or False)] + domain
2460 if not isinstance(groupby_list, (str, unicode)):
2461 if groupby or not context.get('group_by_no_leaf', False):
2462 d['__context'] = {'group_by': groupby_list[1:]}
2463 if groupby and groupby in fget:
2464 if d[groupby] and fget[groupby]['type'] in ('date', 'datetime'):
2465 dt = datetime.datetime.strptime(alldata[d['id']][groupby][:7], '%Y-%m')
2466 days = calendar.monthrange(dt.year, dt.month)[1]
2468 d[groupby] = datetime.datetime.strptime(d[groupby][:10], '%Y-%m-%d').strftime('%B %Y')
2469 d['__domain'] = [(groupby, '>=', alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-01', '%Y-%m-%d').strftime('%Y-%m-%d') or False),\
2470 (groupby, '<=', alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-' + str(days), '%Y-%m-%d').strftime('%Y-%m-%d') or False)] + domain
2471 del alldata[d['id']][groupby]
2472 d.update(alldata[d['id']])
2476 def _inherits_join_add(self, current_table, parent_model_name, query):
2478 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2479 :param current_table: current model object
2480 :param parent_model_name: name of the parent model for which the clauses should be added
2481 :param query: query object on which the JOIN should be added
2483 inherits_field = current_table._inherits[parent_model_name]
2484 parent_model = self.pool.get(parent_model_name)
2485 parent_table_name = parent_model._table
2486 quoted_parent_table_name = '"%s"' % parent_table_name
2487 if quoted_parent_table_name not in query.tables:
2488 query.tables.append(quoted_parent_table_name)
2489 query.where_clause.append('(%s.%s = %s.id)' % (current_table._table, inherits_field, parent_table_name))
2493 def _inherits_join_calc(self, field, query):
2495 Adds missing table select and join clause(s) to ``query`` for reaching
2496 the field coming from an '_inherits' parent table (no duplicates).
2498 :param field: name of inherited field to reach
2499 :param query: query object on which the JOIN should be added
2500 :return: qualified name of field, to be used in SELECT clause
2502 current_table = self
2503 while field in current_table._inherit_fields and not field in current_table._columns:
2504 parent_model_name = current_table._inherit_fields[field][0]
2505 parent_table = self.pool.get(parent_model_name)
2506 self._inherits_join_add(current_table, parent_model_name, query)
2507 current_table = parent_table
2508 return '"%s".%s' % (current_table._table, field)
2510 def _parent_store_compute(self, cr):
2511 if not self._parent_store:
2513 logger = netsvc.Logger()
2514 logger.notifyChannel('data', netsvc.LOG_INFO, 'Computing parent left and right for table %s...' % (self._table, ))
2515 def browse_rec(root, pos=0):
2517 where = self._parent_name+'='+str(root)
2519 where = self._parent_name+' IS NULL'
2520 if self._parent_order:
2521 where += ' order by '+self._parent_order
2522 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2524 for id in cr.fetchall():
2525 pos2 = browse_rec(id[0], pos2)
2526 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2528 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2529 if self._parent_order:
2530 query += ' order by ' + self._parent_order
2533 for (root,) in cr.fetchall():
2534 pos = browse_rec(root, pos)
2537 def _update_store(self, cr, f, k):
2538 logger = netsvc.Logger()
2539 logger.notifyChannel('data', netsvc.LOG_INFO, "storing computed values of fields.function '%s'" % (k,))
2540 ss = self._columns[k]._symbol_set
2541 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2542 cr.execute('select id from '+self._table)
2543 ids_lst = map(lambda x: x[0], cr.fetchall())
2546 ids_lst = ids_lst[40:]
2547 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2548 for key, val in res.items():
2551 # if val is a many2one, just write the ID
2552 if type(val) == tuple:
2554 if (val<>False) or (type(val)<>bool):
2555 cr.execute(update_query, (ss[1](val), key))
2557 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2558 """Raise except_orm if value is not among the valid values for the selection field"""
2559 if self._columns[field]._type == 'reference':
2560 val_model, val_id_str = value.split(',', 1)
2563 val_id = long(val_id_str)
2567 raise except_orm(_('ValidateError'),
2568 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2572 if isinstance(self._columns[field].selection, (tuple, list)):
2573 if val in dict(self._columns[field].selection):
2575 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2577 raise except_orm(_('ValidateError'),
2578 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field))
2580 def _check_removed_columns(self, cr, log=False):
2581 # iterate on the database columns to drop the NOT NULL constraints
2582 # of fields which were required but have been removed (or will be added by another module)
2583 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2584 columns += MAGIC_COLUMNS
2585 cr.execute("SELECT a.attname, a.attnotnull"
2586 " FROM pg_class c, pg_attribute a"
2587 " WHERE c.relname=%s"
2588 " AND c.oid=a.attrelid"
2589 " AND a.attisdropped=%s"
2590 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2591 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2593 for column in cr.dictfetchall():
2595 self.__logger.debug("column %s is in the table %s but not in the corresponding object %s",
2596 column['attname'], self._table, self._name)
2597 if column['attnotnull']:
2598 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2599 self.__schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2600 self._table, column['attname'])
2602 # checked version: for direct m2o starting from `self`
2603 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2604 assert self.is_transient() or not dest_model.is_transient(), \
2605 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2606 if self.is_transient() and not dest_model.is_transient():
2607 # TransientModel relationships to regular Models are annoying
2608 # usually because they could block deletion due to the FKs.
2609 # So unless stated otherwise we default them to ondelete=cascade.
2610 ondelete = ondelete or 'cascade'
2611 self._foreign_keys.append((self._table, source_field, dest_model._table, ondelete or 'set null'))
2612 self.__schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
2613 self._table, source_field, dest_model._table, ondelete)
2615 # unchecked version: for custom cases, such as m2m relationships
2616 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2617 self._foreign_keys.append((source_table, source_field, dest_model._table, ondelete or 'set null'))
2618 self.__schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
2619 source_table, source_field, dest_model._table, ondelete)
2621 def _auto_init(self, cr, context=None):
2624 Call _field_create and, unless _auto is False:
2626 - create the corresponding table in database for the model,
2627 - possibly add the parent columns in database,
2628 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2629 'write_date' in database if _log_access is True (the default),
2630 - report on database columns no more existing in _columns,
2631 - remove no more existing not null constraints,
2632 - alter existing database columns to match _columns,
2633 - create database tables to match _columns,
2634 - add database indices to match _columns,
2635 - save in self._foreign_keys a list a foreign keys to create (see
2639 self._foreign_keys = []
2640 raise_on_invalid_object_name(self._name)
2643 store_compute = False
2645 update_custom_fields = context.get('update_custom_fields', False)
2646 self._field_create(cr, context=context)
2647 create = not self._table_exist(cr)
2649 if getattr(self, '_auto', True):
2652 self._create_table(cr)
2655 if self._parent_store:
2656 if not self._parent_columns_exist(cr):
2657 self._create_parent_columns(cr)
2658 store_compute = True
2660 # Create the create_uid, create_date, write_uid, write_date, columns if desired.
2661 if self._log_access:
2662 self._add_log_columns(cr)
2664 self._check_removed_columns(cr, log=False)
2666 # iterate on the "object columns"
2667 column_data = self._select_column_data(cr)
2669 for k, f in self._columns.iteritems():
2670 if k in MAGIC_COLUMNS:
2672 # Don't update custom (also called manual) fields
2673 if f.manual and not update_custom_fields:
2676 if isinstance(f, fields.one2many):
2677 self._o2m_raise_on_missing_reference(cr, f)
2679 elif isinstance(f, fields.many2many):
2680 self._m2m_raise_or_create_relation(cr, f)
2683 res = column_data.get(k)
2685 # The field is not found as-is in database, try if it
2686 # exists with an old name.
2687 if not res and hasattr(f, 'oldname'):
2688 res = column_data.get(f.oldname)
2690 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2692 column_data[k] = res
2693 self.__schema.debug("Table '%s': renamed column '%s' to '%s'",
2694 self._table, f.oldname, k)
2696 # The field already exists in database. Possibly
2697 # change its type, rename it, drop it or change its
2700 f_pg_type = res['typname']
2701 f_pg_size = res['size']
2702 f_pg_notnull = res['attnotnull']
2703 if isinstance(f, fields.function) and not f.store and\
2704 not getattr(f, 'nodrop', False):
2705 self.__logger.info('column %s (%s) in table %s removed: converted to a function !\n',
2706 k, f.string, self._table)
2707 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2709 self.__schema.debug("Table '%s': dropped column '%s' with cascade",
2713 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2718 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2719 ('varchar', 'text', 'TEXT', ''),
2720 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2721 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2722 ('timestamp', 'date', 'date', '::date'),
2723 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2724 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2726 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size < f.size:
2727 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2728 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2729 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2730 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2732 self.__schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2733 self._table, k, f_pg_size, f.size)
2735 if (f_pg_type==c[0]) and (f._type==c[1]):
2736 if f_pg_type != f_obj_type:
2738 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2739 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2740 cr.execute(('UPDATE "%s" SET "%s"=temp_change_size'+c[3]) % (self._table, k))
2741 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2743 self.__schema.debug("Table '%s': column '%s' changed type from %s to %s",
2744 self._table, k, c[0], c[1])
2747 if f_pg_type != f_obj_type:
2751 newname = k + '_moved' + str(i)
2752 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2753 "WHERE c.relname=%s " \
2754 "AND a.attname=%s " \
2755 "AND c.oid=a.attrelid ", (self._table, newname))
2756 if not cr.fetchone()[0]:
2760 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2761 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2762 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2763 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2764 self.__schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2765 self._table, k, f_pg_type, f._type, newname)
2767 # if the field is required and hasn't got a NOT NULL constraint
2768 if f.required and f_pg_notnull == 0:
2769 # set the field to the default value if any
2770 if k in self._defaults:
2771 if callable(self._defaults[k]):
2772 default = self._defaults[k](self, cr, SUPERUSER_ID, context)
2774 default = self._defaults[k]
2776 if (default is not None):
2777 ss = self._columns[k]._symbol_set
2778 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k)
2779 cr.execute(query, (ss[1](default),))
2780 # add the NOT NULL constraint
2783 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2785 self.__schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2788 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2789 "If you want to have it, you should update the records and execute manually:\n"\
2790 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2791 self.__schema.warn(msg, self._table, k, self._table, k)
2793 elif not f.required and f_pg_notnull == 1:
2794 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2796 self.__schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2799 indexname = '%s_%s_index' % (self._table, k)
2800 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2801 res2 = cr.dictfetchall()
2802 if not res2 and f.select:
2803 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2805 if f._type == 'text':
2806 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2807 msg = "Table '%s': Adding (b-tree) index for text column '%s'."\
2808 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2809 " because there is a length limit for indexable btree values!\n"\
2810 "Use a search view instead if you simply want to make the field searchable."
2811 self.__schema.warn(msg, self._table, k, f._type)
2812 if res2 and not f.select:
2813 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2815 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2816 self.__schema.debug(msg, self._table, k, f._type)
2818 if isinstance(f, fields.many2one):
2819 dest_model = self.pool.get(f._obj)
2820 ref = dest_model._table
2821 if ref != 'ir_actions':
2822 cr.execute('SELECT confdeltype, conname FROM pg_constraint as con, pg_class as cl1, pg_class as cl2, '
2823 'pg_attribute as att1, pg_attribute as att2 '
2824 'WHERE con.conrelid = cl1.oid '
2825 'AND cl1.relname = %s '
2826 'AND con.confrelid = cl2.oid '
2827 'AND cl2.relname = %s '
2828 'AND array_lower(con.conkey, 1) = 1 '
2829 'AND con.conkey[1] = att1.attnum '
2830 'AND att1.attrelid = cl1.oid '
2831 'AND att1.attname = %s '
2832 'AND array_lower(con.confkey, 1) = 1 '
2833 'AND con.confkey[1] = att2.attnum '
2834 'AND att2.attrelid = cl2.oid '
2835 'AND att2.attname = %s '
2836 "AND con.contype = 'f'", (self._table, ref, k, 'id'))
2837 res2 = cr.dictfetchall()
2839 if res2[0]['confdeltype'] != POSTGRES_CONFDELTYPES.get((f.ondelete or 'set null').upper(), 'a'):
2840 cr.execute('ALTER TABLE "' + self._table + '" DROP CONSTRAINT "' + res2[0]['conname'] + '"')
2841 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2843 self.__schema.debug("Table '%s': column '%s': XXX",
2846 # The field doesn't exist in database. Create it if necessary.
2848 if not isinstance(f, fields.function) or f.store:
2849 # add the missing field
2850 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2851 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2852 self.__schema.debug("Table '%s': added column '%s' with definition=%s",
2853 self._table, k, get_pg_type(f)[1])
2856 if not create and k in self._defaults:
2857 if callable(self._defaults[k]):
2858 default = self._defaults[k](self, cr, SUPERUSER_ID, context)
2860 default = self._defaults[k]
2862 ss = self._columns[k]._symbol_set
2863 query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
2864 cr.execute(query, (ss[1](default),))
2866 netsvc.Logger().notifyChannel('data', netsvc.LOG_DEBUG, "Table '%s': setting default value of new column %s" % (self._table, k))
2868 # remember the functions to call for the stored fields
2869 if isinstance(f, fields.function):
2871 if f.store is not True: # i.e. if f.store is a dict
2872 order = f.store[f.store.keys()[0]][2]
2873 todo_end.append((order, self._update_store, (f, k)))
2875 # and add constraints if needed
2876 if isinstance(f, fields.many2one):
2877 if not self.pool.get(f._obj):
2878 raise except_orm('Programming Error', ('There is no reference available for %s') % (f._obj,))
2879 dest_model = self.pool.get(f._obj)
2880 ref = dest_model._table
2881 # ir_actions is inherited so foreign key doesn't work on it
2882 if ref != 'ir_actions':
2883 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2885 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2889 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2890 self.__schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2893 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2894 "Try to re-run: openerp-server --update=module\n"\
2895 "If it doesn't work, update records and execute manually:\n"\
2896 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2897 self.__logger.warn(msg, k, self._table, self._table, k)
2901 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2902 create = not bool(cr.fetchone())
2904 cr.commit() # start a new transaction
2906 self._add_sql_constraints(cr)
2909 self._execute_sql(cr)
2912 self._parent_store_compute(cr)
2918 def _auto_end(self, cr, context=None):
2919 """ Create the foreign keys recorded by _auto_init. """
2920 for t, k, r, d in self._foreign_keys:
2921 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2923 del self._foreign_keys
2926 def _table_exist(self, cr):
2927 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2931 def _create_table(self, cr):
2932 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,))
2933 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2934 self.__schema.debug("Table '%s': created", self._table)
2937 def _parent_columns_exist(self, cr):
2938 cr.execute("""SELECT c.relname
2939 FROM pg_class c, pg_attribute a
2940 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2941 """, (self._table, 'parent_left'))
2945 def _create_parent_columns(self, cr):
2946 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2947 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2948 if 'parent_left' not in self._columns:
2949 self.__logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2951 self.__schema.debug("Table '%s': added column '%s' with definition=%s",
2952 self._table, 'parent_left', 'INTEGER')
2953 elif not self._columns['parent_left'].select:
2954 self.__logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2956 if 'parent_right' not in self._columns:
2957 self.__logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2959 self.__schema.debug("Table '%s': added column '%s' with definition=%s",
2960 self._table, 'parent_right', 'INTEGER')
2961 elif not self._columns['parent_right'].select:
2962 self.__logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2964 if self._columns[self._parent_name].ondelete != 'cascade':
2965 self.__logger.error("The column %s on object %s must be set as ondelete='cascade'",
2966 self._parent_name, self._name)
2971 def _add_log_columns(self, cr):
2972 for field, field_def in LOG_ACCESS_COLUMNS.iteritems():
2975 FROM pg_class c, pg_attribute a
2976 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2977 """, (self._table, field))
2979 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def))
2981 self.__schema.debug("Table '%s': added column '%s' with definition=%s",
2982 self._table, field, field_def)
2985 def _select_column_data(self, cr):
2986 # attlen is the number of bytes necessary to represent the type when
2987 # the type has a fixed size. If the type has a varying size attlen is
2988 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2989 # Thus the query can return a negative size for a unlimited varchar.
2990 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size " \
2991 "FROM pg_class c,pg_attribute a,pg_type t " \
2992 "WHERE c.relname=%s " \
2993 "AND c.oid=a.attrelid " \
2994 "AND a.atttypid=t.oid", (self._table,))
2995 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2998 def _o2m_raise_on_missing_reference(self, cr, f):
2999 # TODO this check should be a method on fields.one2many.
3000 other = self.pool.get(f._obj)
3002 # TODO the condition could use fields_get_keys().
3003 if f._fields_id not in other._columns.keys():
3004 if f._fields_id not in other._inherit_fields.keys():
3005 raise except_orm('Programming Error', ("There is no reference field '%s' found for '%s'") % (f._fields_id, f._obj,))
3008 def _m2m_raise_or_create_relation(self, cr, f):
3009 m2m_tbl, col1, col2 = f._sql_names(self)
3010 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
3011 if not cr.dictfetchall():
3012 if not self.pool.get(f._obj):
3013 raise except_orm('Programming Error', ('Many2Many destination model does not exist: `%s`') % (f._obj,))
3014 dest_model = self.pool.get(f._obj)
3015 ref = dest_model._table
3016 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s")) WITH OIDS' % (m2m_tbl, col1, col2, col1, col2))
3018 # create foreign key references with ondelete=cascade, unless the targets are SQL views
3019 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
3020 if not cr.fetchall():
3021 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
3022 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
3023 if not cr.fetchall():
3024 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
3026 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
3027 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
3028 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
3030 self.__schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
3033 def _add_sql_constraints(self, cr):
3036 Modify this model's database table constraints so they match the one in
3040 for (key, con, _) in self._sql_constraints:
3041 conname = '%s_%s' % (self._table, key)
3043 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
3044 existing_constraints = cr.dictfetchall()
3049 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
3050 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
3051 self._table, conname, con),
3052 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
3057 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
3058 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
3059 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
3065 if not existing_constraints:
3066 # constraint does not exists:
3067 sql_actions['add']['execute'] = True
3068 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
3069 elif con.lower() not in [item['condef'].lower() for item in existing_constraints]:
3070 # constraint exists but its definition has changed:
3071 sql_actions['drop']['execute'] = True
3072 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
3073 sql_actions['add']['execute'] = True
3074 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
3076 # we need to add the constraint:
3077 sql_actions = [item for item in sql_actions.values()]
3078 sql_actions.sort(key=lambda x: x['order'])
3079 for sql_action in [action for action in sql_actions if action['execute']]:
3081 cr.execute(sql_action['query'])
3083 self.__schema.debug(sql_action['msg_ok'])
3085 self.__schema.warn(sql_action['msg_err'])
3089 def _execute_sql(self, cr):
3090 """ Execute the SQL code from the _sql attribute (if any)."""
3091 if hasattr(self, "_sql"):
3092 for line in self._sql.split(';'):
3093 line2 = line.replace('\n', '').strip()
3099 # Update objects that uses this one to update their _inherits fields
3102 def _inherits_reload_src(self):
3103 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
3104 for obj in self.pool.models.values():
3105 if self._name in obj._inherits:
3106 obj._inherits_reload()
3109 def _inherits_reload(self):
3110 """ Recompute the _inherit_fields mapping.
3112 This will also call itself on each inherits'd child model.
3116 for table in self._inherits:
3117 other = self.pool.get(table)
3118 for col in other._columns.keys():
3119 res[col] = (table, self._inherits[table], other._columns[col], table)
3120 for col in other._inherit_fields.keys():
3121 res[col] = (table, self._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
3122 self._inherit_fields = res
3123 self._all_columns = self._get_column_infos()
3124 self._inherits_reload_src()
3127 def _get_column_infos(self):
3128 """Returns a dict mapping all fields names (direct fields and
3129 inherited field via _inherits) to a ``column_info`` struct
3130 giving detailed columns """
3132 for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems():
3133 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
3134 for k, col in self._columns.iteritems():
3135 result[k] = fields.column_info(k, col)
3139 def _inherits_check(self):
3140 for table, field_name in self._inherits.items():
3141 if field_name not in self._columns:
3142 logging.getLogger('init').info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.' % (field_name, self._name))
3143 self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
3144 required=True, ondelete="cascade")
3145 elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() != "cascade":
3146 logging.getLogger('init').warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade", forcing it.' % (field_name, self._name))
3147 self._columns[field_name].required = True
3148 self._columns[field_name].ondelete = "cascade"
3150 #def __getattr__(self, name):
3152 # Proxies attribute accesses to the `inherits` parent so we can call methods defined on the inherited parent
3153 # (though inherits doesn't use Python inheritance).
3154 # Handles translating between local ids and remote ids.
3155 # Known issue: doesn't work correctly when using python's own super(), don't involve inherit-based inheritance
3156 # when you have inherits.
3158 # for model, field in self._inherits.iteritems():
3159 # proxy = self.pool.get(model)
3160 # if hasattr(proxy, name):
3161 # attribute = getattr(proxy, name)
3162 # if not hasattr(attribute, '__call__'):
3166 # return super(orm, self).__getattr__(name)
3168 # def _proxy(cr, uid, ids, *args, **kwargs):
3169 # objects = self.browse(cr, uid, ids, kwargs.get('context', None))
3170 # lst = [obj[field].id for obj in objects if obj[field]]
3171 # return getattr(proxy, name)(cr, uid, lst, *args, **kwargs)
3176 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3177 """ Return the definition of each field.
3179 The returned value is a dictionary (indiced by field name) of
3180 dictionaries. The _inherits'd fields are included. The string, help,
3181 and selection (if present) attributes are translated.
3183 :param cr: database cursor
3184 :param user: current user id
3185 :param fields: list of fields
3186 :param context: context arguments, like lang, time zone
3187 :return: dictionary of field dictionaries, each one describing a field of the business object
3188 :raise AccessError: * if user has no create/write rights on the requested object
3194 write_access = self.check_write(cr, user, False) or \
3195 self.check_create(cr, user, False)
3199 translation_obj = self.pool.get('ir.translation')
3200 for parent in self._inherits:
3201 res.update(self.pool.get(parent).fields_get(cr, user, allfields, context))
3203 for f, field in self._columns.iteritems():
3204 if allfields and f not in allfields:
3207 res[f] = fields.field_to_dict(self, cr, user, field, context=context)
3209 if not write_access:
3210 res[f]['readonly'] = True
3211 res[f]['states'] = {}
3213 if 'string' in res[f]:
3214 res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context.get('lang', False) or 'en_US')
3216 res[f]['string'] = res_trans
3217 if 'help' in res[f]:
3218 help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context.get('lang', False) or 'en_US')
3220 res[f]['help'] = help_trans
3221 if 'selection' in res[f]:
3222 if isinstance(field.selection, (tuple, list)):
3223 sel = field.selection
3225 for key, val in sel:
3228 val2 = translation_obj._get_source(cr, user, self._name + ',' + f, 'selection', context.get('lang', False) or 'en_US', val)
3229 sel2.append((key, val2 or val))
3230 res[f]['selection'] = sel2
3234 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3235 """ Read records with given ids with the given fields
3237 :param cr: database cursor
3238 :param user: current user id
3239 :param ids: id or list of the ids of the records to read
3240 :param fields: optional list of field names to return (default: all fields would be returned)
3241 :type fields: list (example ['field_name_1', ...])
3242 :param context: optional context dictionary - it may contains keys for specifying certain options
3243 like ``context_lang``, ``context_tz`` to alter the results of the call.
3244 A special ``bin_size`` boolean flag may also be passed in the context to request the
3245 value of all fields.binary columns to be returned as the size of the binary instead of its
3246 contents. This can also be selectively overriden by passing a field-specific flag
3247 in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
3248 Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
3249 :return: list of dictionaries((dictionary per record asked)) with requested field values
3250 :rtype: [{‘name_of_the_field’: value, ...}, ...]
3251 :raise AccessError: * if user has no read rights on the requested object
3252 * if user tries to bypass access rules for read on the requested object
3258 self.check_read(cr, user)
3260 fields = list(set(self._columns.keys() + self._inherit_fields.keys()))
3261 if isinstance(ids, (int, long)):
3265 select = map(lambda x: isinstance(x, dict) and x['id'] or x, select)
3266 result = self._read_flat(cr, user, select, fields, context, load)
3269 for key, v in r.items():
3273 if isinstance(ids, (int, long, dict)):
3274 return result and result[0] or False
3277 def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
3282 if fields_to_read == None:
3283 fields_to_read = self._columns.keys()
3285 # Construct a clause for the security rules.
3286 # 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
3287 # or will at least contain self._table.
3288 rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
3290 # all inherited fields + all non inherited fields for which the attribute whose name is in load is True
3291 fields_pre = [f for f in fields_to_read if
3292 f == self.CONCURRENCY_CHECK_FIELD
3293 or (f in self._columns and getattr(self._columns[f], '_classic_write'))
3294 ] + self._inherits.values()
3298 def convert_field(f):
3299 f_qual = '%s."%s"' % (self._table, f) # need fully-qualified references in case len(tables) > 1
3300 if f in ('create_date', 'write_date'):
3301 return "date_trunc('second', %s) as %s" % (f_qual, f)
3302 if f == self.CONCURRENCY_CHECK_FIELD:
3303 if self._log_access:
3304 return "COALESCE(%s.write_date, %s.create_date, now())::timestamp AS %s" % (self._table, self._table, f,)
3305 return "now()::timestamp AS %s" % (f,)
3306 if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
3307 return 'length(%s) as "%s"' % (f_qual, f)
3310 fields_pre2 = map(convert_field, fields_pre)
3311 order_by = self._parent_order or self._order
3312 select_fields = ','.join(fields_pre2 + [self._table + '.id'])
3313 query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table)
3315 query += " AND " + (' OR '.join(rule_clause))
3316 query += " ORDER BY " + order_by
3317 for sub_ids in cr.split_for_in_conditions(ids):
3319 cr.execute(query, [tuple(sub_ids)] + rule_params)
3320 if cr.rowcount != len(sub_ids):
3321 raise except_orm(_('AccessError'),
3322 _('Operation prohibited by access rules, or performed on an already deleted document (Operation: read, Document type: %s).')
3323 % (self._description,))
3325 cr.execute(query, (tuple(sub_ids),))
3326 res.extend(cr.dictfetchall())
3328 res = map(lambda x: {'id': x}, ids)
3330 for f in fields_pre:
3331 if f == self.CONCURRENCY_CHECK_FIELD:
3333 if self._columns[f].translate:
3334 ids = [x['id'] for x in res]
3335 #TODO: optimize out of this loop
3336 res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context.get('lang', False) or 'en_US', ids)
3338 r[f] = res_trans.get(r['id'], False) or r[f]
3340 for table in self._inherits:
3341 col = self._inherits[table]
3342 cols = [x for x in intersect(self._inherit_fields.keys(), fields_to_read) if x not in self._columns.keys()]
3345 res2 = self.pool.get(table).read(cr, user, [x[col] for x in res], cols, context, load)
3353 if not record[col]: # if the record is deleted from _inherits table?
3355 record.update(res3[record[col]])
3356 if col not in fields_to_read:
3359 # all fields which need to be post-processed by a simple function (symbol_get)
3360 fields_post = filter(lambda x: x in self._columns and self._columns[x]._symbol_get, fields_to_read)
3363 for f in fields_post:
3364 r[f] = self._columns[f]._symbol_get(r[f])
3365 ids = [x['id'] for x in res]
3367 # all non inherited fields for which the attribute whose name is in load is False
3368 fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read)
3370 # Compute POST fields
3372 for f in fields_post:
3373 todo.setdefault(self._columns[f]._multi, [])
3374 todo[self._columns[f]._multi].append(f)
3375 for key, val in todo.items():
3377 res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res)
3378 assert res2 is not None, \
3379 'The function field "%s" on the "%s" model returned None\n' \
3380 '(a dictionary was expected).' % (val[0], self._name)
3383 if isinstance(res2[record['id']], str): res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6
3384 multi_fields = res2.get(record['id'],{})
3386 record[pos] = multi_fields.get(pos,[])
3389 res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
3392 record[f] = res2[record['id']]
3397 for field in vals.copy():
3399 if field in self._columns:
3400 fobj = self._columns[field]
3407 for group in groups:
3408 module = group.split(".")[0]
3409 grp = group.split(".")[1]
3410 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3411 (grp, module, 'res.groups', user))
3412 readonly = cr.fetchall()
3413 if readonly[0][0] >= 1:
3416 elif readonly[0][0] == 0:
3422 if type(vals[field]) == type([]):
3424 elif type(vals[field]) == type(0.0):
3426 elif type(vals[field]) == type(''):
3427 vals[field] = '=No Permission='
3432 # TODO check READ access
3433 def perm_read(self, cr, user, ids, context=None, details=True):
3435 Returns some metadata about the given records.
3437 :param details: if True, \*_uid fields are replaced with the name of the user
3438 :return: list of ownership dictionaries for each requested record
3439 :rtype: list of dictionaries with the following keys:
3442 * create_uid: user who created the record
3443 * create_date: date when the record was created
3444 * write_uid: last user who changed the record
3445 * write_date: date of the last change to the record
3446 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3453 uniq = isinstance(ids, (int, long))
3457 if self._log_access:
3458 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3459 quoted_table = '"%s"' % self._table
3460 fields_str = ",".join('%s.%s'%(quoted_table, field) for field in fields)
3461 query = '''SELECT %s, __imd.module, __imd.name
3462 FROM %s LEFT JOIN ir_model_data __imd
3463 ON (__imd.model = %%s and __imd.res_id = %s.id)
3464 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3465 cr.execute(query, (self._name, tuple(ids)))
3466 res = cr.dictfetchall()
3469 r[key] = r[key] or False
3470 if details and key in ('write_uid', 'create_uid') and r[key]:
3472 r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0]
3474 pass # Leave the numeric uid there
3475 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3476 del r['name'], r['module']
3481 def _check_concurrency(self, cr, ids, context):
3484 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3486 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, now())::timestamp)"
3487 for sub_ids in cr.split_for_in_conditions(ids):
3490 id_ref = "%s,%s" % (self._name, id)
3491 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3493 ids_to_check.extend([id, update_date])
3494 if not ids_to_check:
3496 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3499 # mention the first one only to keep the error message readable
3500 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3502 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3503 """Verifies that the operation given by ``operation`` is allowed for the user
3504 according to the access rights."""
3505 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3507 def check_create(self, cr, uid, raise_exception=True):
3508 return self.check_access_rights(cr, uid, 'create', raise_exception)
3510 def check_read(self, cr, uid, raise_exception=True):
3511 return self.check_access_rights(cr, uid, 'read', raise_exception)
3513 def check_unlink(self, cr, uid, raise_exception=True):
3514 return self.check_access_rights(cr, uid, 'unlink', raise_exception)
3516 def check_write(self, cr, uid, raise_exception=True):
3517 return self.check_access_rights(cr, uid, 'write', raise_exception)
3519 def check_access_rule(self, cr, uid, ids, operation, context=None):
3520 """Verifies that the operation given by ``operation`` is allowed for the user
3521 according to ir.rules.
3523 :param operation: one of ``write``, ``unlink``
3524 :raise except_orm: * if current ir.rules do not permit this operation.
3525 :return: None if the operation is allowed
3527 if uid == SUPERUSER_ID:
3530 if self.is_transient():
3531 # Only one single implicit access rule for transient models: owner only!
3532 # This is ok to hardcode because we assert that TransientModels always
3533 # have log_access enabled and this the create_uid column is always there.
3534 # And even with _inherits, these fields are always present in the local
3535 # table too, so no need for JOINs.
3536 cr.execute("""SELECT distinct create_uid
3538 WHERE id IN %%s""" % self._table, (tuple(ids),))
3539 uids = [x[0] for x in cr.fetchall()]
3540 if len(uids) != 1 or uids[0] != uid:
3541 raise except_orm(_('AccessError'), '%s access is '
3542 'restricted to your own records for transient models '
3543 '(except for the super-user).' % operation.capitalize())
3545 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3547 where_clause = ' and ' + ' and '.join(where_clause)
3548 for sub_ids in cr.split_for_in_conditions(ids):
3549 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3550 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3551 [sub_ids] + where_params)
3552 if cr.rowcount != len(sub_ids):
3553 raise except_orm(_('AccessError'),
3554 _('Operation prohibited by access rules, or performed on an already deleted document (Operation: %s, Document type: %s).')
3555 % (operation, self._description))
3557 def unlink(self, cr, uid, ids, context=None):
3559 Delete records with given ids
3561 :param cr: database cursor
3562 :param uid: current user id
3563 :param ids: id or list of ids
3564 :param context: (optional) context arguments, like lang, time zone
3566 :raise AccessError: * if user has no unlink rights on the requested object
3567 * if user tries to bypass access rules for unlink on the requested object
3568 :raise UserError: if the record is default property for other records
3573 if isinstance(ids, (int, long)):
3576 result_store = self._store_get_values(cr, uid, ids, None, context)
3578 self._check_concurrency(cr, ids, context)
3580 self.check_unlink(cr, uid)
3582 properties = self.pool.get('ir.property')
3583 domain = [('res_id', '=', False),
3584 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3586 if properties.search(cr, uid, domain, context=context):
3587 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3589 wf_service = netsvc.LocalService("workflow")
3591 wf_service.trg_delete(uid, self._name, oid, cr)
3594 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3595 pool_model_data = self.pool.get('ir.model.data')
3596 ir_values_obj = self.pool.get('ir.values')
3597 for sub_ids in cr.split_for_in_conditions(ids):
3598 cr.execute('delete from ' + self._table + ' ' \
3599 'where id IN %s', (sub_ids,))
3601 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3602 # as these are not connected with real database foreign keys, and would be dangling references.
3603 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3604 # to avoid possible side-effects during admin calls.
3605 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3606 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3607 # Step 2. Marching towards the real deletion of referenced records
3609 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3611 # For the same reason, removing the record relevant to ir_values
3612 ir_value_ids = ir_values_obj.search(cr, uid,
3613 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3616 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3618 for order, object, store_ids, fields in result_store:
3619 if object != self._name:
3620 obj = self.pool.get(object)
3621 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(store_ids),))
3622 rids = map(lambda x: x[0], cr.fetchall())
3624 obj._store_set_values(cr, uid, rids, fields, context)
3631 def write(self, cr, user, ids, vals, context=None):
3633 Update records with given ids with the given field values
3635 :param cr: database cursor
3636 :param user: current user id
3638 :param ids: object id or list of object ids to update according to **vals**
3639 :param vals: field values to update, e.g {'field_name': new_field_value, ...}
3640 :type vals: dictionary
3641 :param context: (optional) context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
3642 :type context: dictionary
3644 :raise AccessError: * if user has no write rights on the requested object
3645 * if user tries to bypass access rules for write on the requested object
3646 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3647 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3649 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
3651 + For a many2many field, a list of tuples is expected.
3652 Here is the list of tuple that are accepted, with the corresponding semantics ::
3654 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3655 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3656 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3657 (3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
3658 (4, ID) link to existing record with id = ID (adds a relationship)
3659 (5) unlink all (like using (3,ID) for all linked records)
3660 (6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
3663 [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
3665 + For a one2many field, a lits of tuples is expected.
3666 Here is the list of tuple that are accepted, with the corresponding semantics ::
3668 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3669 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3670 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3673 [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
3675 + For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
3676 + For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
3680 for field in vals.copy():
3682 if field in self._columns:
3683 fobj = self._columns[field]
3684 elif field in self._inherit_fields:
3685 fobj = self._inherit_fields[field][2]
3692 for group in groups:
3693 module = group.split(".")[0]
3694 grp = group.split(".")[1]
3695 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3696 (grp, module, 'res.groups', user))
3697 readonly = cr.fetchall()
3698 if readonly[0][0] >= 1:
3701 elif readonly[0][0] == 0:
3713 if isinstance(ids, (int, long)):
3716 self._check_concurrency(cr, ids, context)
3717 self.check_write(cr, user)
3719 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3721 # No direct update of parent_left/right
3722 vals.pop('parent_left', None)
3723 vals.pop('parent_right', None)
3725 parents_changed = []
3726 parent_order = self._parent_order or self._order
3727 if self._parent_store and (self._parent_name in vals):
3728 # The parent_left/right computation may take up to
3729 # 5 seconds. No need to recompute the values if the
3730 # parent is the same.
3731 # Note: to respect parent_order, nodes must be processed in
3732 # order, so ``parents_changed`` must be ordered properly.
3733 parent_val = vals[self._parent_name]
3735 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3736 (self._table, self._parent_name, self._parent_name, parent_order)
3737 cr.execute(query, (tuple(ids), parent_val))
3739 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3740 (self._table, self._parent_name, parent_order)
3741 cr.execute(query, (tuple(ids),))
3742 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3749 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3751 if field in self._columns:
3752 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3753 if (not totranslate) or not self._columns[field].translate:
3754 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3755 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3756 direct.append(field)
3758 upd_todo.append(field)
3760 updend.append(field)
3761 if field in self._columns \
3762 and hasattr(self._columns[field], 'selection') \
3764 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3766 if self._log_access:
3767 upd0.append('write_uid=%s')
3768 upd0.append('write_date=now()')
3772 self.check_access_rule(cr, user, ids, 'write', context=context)
3773 for sub_ids in cr.split_for_in_conditions(ids):
3774 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3775 'where id IN %s', upd1 + [sub_ids])
3776 if cr.rowcount != len(sub_ids):
3777 raise except_orm(_('AccessError'),
3778 _('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3783 if self._columns[f].translate:
3784 src_trans = self.pool.get(self._name).read(cr, user, ids, [f])[0][f]
3787 # Inserting value to DB
3788 self.write(cr, user, ids, {f: vals[f]})
3789 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3792 # call the 'set' method of fields which are not classic_write
3793 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3795 # default element in context must be removed when call a one2many or many2many
3796 rel_context = context.copy()
3797 for c in context.items():
3798 if c[0].startswith('default_'):
3799 del rel_context[c[0]]
3801 for field in upd_todo:
3803 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3805 for table in self._inherits:
3806 col = self._inherits[table]
3808 for sub_ids in cr.split_for_in_conditions(ids):
3809 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3810 'where id IN %s', (sub_ids,))
3811 nids.extend([x[0] for x in cr.fetchall()])
3815 if self._inherit_fields[val][0] == table:
3818 self.pool.get(table).write(cr, user, nids, v, context)
3820 self._validate(cr, user, ids, context)
3822 # TODO: use _order to set dest at the right position and not first node of parent
3823 # We can't defer parent_store computation because the stored function
3824 # fields that are computer may refer (directly or indirectly) to
3825 # parent_left/right (via a child_of domain)
3828 self.pool._init_parent[self._name] = True
3830 order = self._parent_order or self._order
3831 parent_val = vals[self._parent_name]
3833 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3835 clause, params = '%s IS NULL' % (self._parent_name,), ()
3837 for id in parents_changed:
3838 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3839 pleft, pright = cr.fetchone()
3840 distance = pright - pleft + 1
3842 # Positions of current siblings, to locate proper insertion point;
3843 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3844 # after each update, in case several nodes are sequentially inserted one
3845 # next to the other (i.e computed incrementally)
3846 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3847 parents = cr.fetchall()
3849 # Find Position of the element
3851 for (parent_pright, parent_id) in parents:
3854 position = parent_pright + 1
3856 # It's the first node of the parent
3861 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3862 position = cr.fetchone()[0] + 1
3864 if pleft < position <= pright:
3865 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3867 if pleft < position:
3868 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3869 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3870 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3872 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3873 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3874 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3876 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3880 for order, object, ids_to_update, fields_to_recompute in result:
3881 key = (object, tuple(fields_to_recompute))
3882 done.setdefault(key, {})
3883 # avoid to do several times the same computation
3885 for id in ids_to_update:
3886 if id not in done[key]:
3887 done[key][id] = True
3889 self.pool.get(object)._store_set_values(cr, user, todo, fields_to_recompute, context)
3891 wf_service = netsvc.LocalService("workflow")
3893 wf_service.trg_write(user, self._name, id, cr)
3897 # TODO: Should set perm to user.xxx
3899 def create(self, cr, user, vals, context=None):
3901 Create a new record for the model.
3903 The values for the new record are initialized using the ``vals``
3904 argument, and if necessary the result of ``default_get()``.
3906 :param cr: database cursor
3907 :param user: current user id
3909 :param vals: field values for new record, e.g {'field_name': field_value, ...}
3910 :type vals: dictionary
3911 :param context: optional context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
3912 :type context: dictionary
3913 :return: id of new record created
3914 :raise AccessError: * if user has no create rights on the requested object
3915 * if user tries to bypass access rules for create on the requested object
3916 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3917 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3919 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific.
3920 Please see the description of the :py:meth:`~osv.osv.osv.write` method for details about the possible values and how
3927 if self.is_transient():
3928 self._transient_vacuum(cr, user)
3930 self.check_create(cr, user)
3932 vals = self._add_missing_default_values(cr, user, vals, context)
3935 for v in self._inherits:
3936 if self._inherits[v] not in vals:
3939 tocreate[v] = {'id': vals[self._inherits[v]]}
3940 (upd0, upd1, upd2) = ('', '', [])
3942 for v in vals.keys():
3943 if v in self._inherit_fields:
3944 (table, col, col_detail, original_parent) = self._inherit_fields[v]
3945 tocreate[table][v] = vals[v]
3948 if (v not in self._inherit_fields) and (v not in self._columns):
3951 # Try-except added to filter the creation of those records whose filds are readonly.
3952 # Example : any dashboard which has all the fields readonly.(due to Views(database views))
3954 cr.execute("SELECT nextval('"+self._sequence+"')")
3956 raise except_orm(_('UserError'),
3957 _('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.'))
3959 id_new = cr.fetchone()[0]
3960 for table in tocreate:
3961 if self._inherits[table] in vals:
3962 del vals[self._inherits[table]]
3964 record_id = tocreate[table].pop('id', None)
3966 if record_id is None or not record_id:
3967 record_id = self.pool.get(table).create(cr, user, tocreate[table], context=context)
3969 self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=context)
3971 upd0 += ',' + self._inherits[table]
3973 upd2.append(record_id)
3975 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
3976 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
3978 for bool_field in bool_fields:
3979 if bool_field not in vals:
3980 vals[bool_field] = False
3982 for field in vals.copy():
3984 if field in self._columns:
3985 fobj = self._columns[field]
3987 fobj = self._inherit_fields[field][2]
3993 for group in groups:
3994 module = group.split(".")[0]
3995 grp = group.split(".")[1]
3996 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
3997 (grp, module, 'res.groups', user))
3998 readonly = cr.fetchall()
3999 if readonly[0][0] >= 1:
4002 elif readonly[0][0] == 0:
4010 if self._columns[field]._classic_write:
4011 upd0 = upd0 + ',"' + field + '"'
4012 upd1 = upd1 + ',' + self._columns[field]._symbol_set[0]
4013 upd2.append(self._columns[field]._symbol_set[1](vals[field]))
4015 if not isinstance(self._columns[field], fields.related):
4016 upd_todo.append(field)
4017 if field in self._columns \
4018 and hasattr(self._columns[field], 'selection') \
4020 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4021 if self._log_access:
4022 upd0 += ',create_uid,create_date'
4025 cr.execute('insert into "'+self._table+'" (id'+upd0+") values ("+str(id_new)+upd1+')', tuple(upd2))
4026 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4027 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4029 if self._parent_store and not context.get('defer_parent_store_computation'):
4031 self.pool._init_parent[self._name] = True
4033 parent = vals.get(self._parent_name, False)
4035 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4037 result_p = cr.fetchall()
4038 for (pleft,) in result_p:
4043 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4044 pleft_old = cr.fetchone()[0]
4047 cr.execute('select max(parent_right) from '+self._table)
4048 pleft = cr.fetchone()[0] or 0
4049 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4050 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4051 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4053 # default element in context must be remove when call a one2many or many2many
4054 rel_context = context.copy()
4055 for c in context.items():
4056 if c[0].startswith('default_'):
4057 del rel_context[c[0]]
4060 for field in upd_todo:
4061 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4062 self._validate(cr, user, [id_new], context)
4064 if not context.get('no_store_function', False):
4065 result += self._store_get_values(cr, user, [id_new], vals.keys(), context)
4068 for order, object, ids, fields2 in result:
4069 if not (object, ids, fields2) in done:
4070 self.pool.get(object)._store_set_values(cr, user, ids, fields2, context)
4071 done.append((object, ids, fields2))
4073 if self._log_create and not (context and context.get('no_store_function', False)):
4074 message = self._description + \
4076 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4077 "' " + _("created.")
4078 self.log(cr, user, id_new, message, True, context=context)
4079 wf_service = netsvc.LocalService("workflow")
4080 wf_service.trg_create(user, self._name, id_new, cr)
4083 def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None):
4084 """Fetch records as objects allowing to use dot notation to browse fields and relations
4086 :param cr: database cursor
4087 :param user: current user id
4088 :param select: id or list of ids.
4089 :param context: context arguments, like lang, time zone
4090 :rtype: object or list of objects requested
4093 self._list_class = list_class or browse_record_list
4095 # need to accepts ints and longs because ids coming from a method
4096 # launched by button in the interface have a type long...
4097 if isinstance(select, (int, long)):
4098 return browse_record(cr, uid, select, self, cache, context=context, list_class=self._list_class, fields_process=fields_process)
4099 elif isinstance(select, list):
4100 return self._list_class([browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select], context=context)
4102 return browse_null()
4104 def _store_get_values(self, cr, uid, ids, fields, context):
4105 """Returns an ordered list of fields.functions to call due to
4106 an update operation on ``fields`` of records with ``ids``,
4107 obtained by calling the 'store' functions of these fields,
4108 as setup by their 'store' attribute.
4110 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4112 if fields is None: fields = []
4113 stored_functions = self.pool._store_function.get(self._name, [])
4115 # use indexed names for the details of the stored_functions:
4116 model_name_, func_field_to_compute_, id_mapping_fnct_, trigger_fields_, priority_ = range(5)
4118 # only keep functions that should be triggered for the ``fields``
4120 to_compute = [f for f in stored_functions \
4121 if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
4124 for function in to_compute:
4125 # use admin user for accessing objects having rules defined on store fields
4126 target_ids = [id for id in function[id_mapping_fnct_](self, cr, SUPERUSER_ID, ids, context) if id]
4128 # the compound key must consider the priority and model name
4129 key = (function[priority_], function[model_name_])
4130 for target_id in target_ids:
4131 mapping.setdefault(key, {}).setdefault(target_id,set()).add(tuple(function))
4133 # Here mapping looks like:
4134 # { (10, 'model_a') : { target_id1: [ (function_1_tuple, function_2_tuple) ], ... }
4135 # (20, 'model_a') : { target_id2: [ (function_3_tuple, function_4_tuple) ], ... }
4136 # (99, 'model_a') : { target_id1: [ (function_5_tuple, function_6_tuple) ], ... }
4139 # Now we need to generate the batch function calls list
4141 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4143 for ((priority,model), id_map) in mapping.iteritems():
4144 functions_ids_maps = {}
4145 # function_ids_maps =
4146 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4147 for id, functions in id_map.iteritems():
4148 functions_ids_maps.setdefault(tuple(functions), []).append(id)
4149 for functions, ids in functions_ids_maps.iteritems():
4150 call_map.setdefault((priority,model),[]).append((priority, model, ids,
4151 [f[func_field_to_compute_] for f in functions]))
4152 ordered_keys = call_map.keys()
4156 result = reduce(operator.add, (call_map[k] for k in ordered_keys))
4159 def _store_set_values(self, cr, uid, ids, fields, context):
4160 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4161 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4166 if self._log_access:
4167 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4171 field_dict.setdefault(r[0], [])
4172 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4173 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4174 for i in self.pool._store_function.get(self._name, []):
4176 up_write_date = write_date + datetime.timedelta(hours=i[5])
4177 if datetime.datetime.now() < up_write_date:
4179 field_dict[r[0]].append(i[1])
4185 if self._columns[f]._multi not in keys:
4186 keys.append(self._columns[f]._multi)
4187 todo.setdefault(self._columns[f]._multi, [])
4188 todo[self._columns[f]._multi].append(f)
4192 # use admin user for accessing objects having rules defined on store fields
4193 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4194 for id, value in result.items():
4196 for f in value.keys():
4197 if f in field_dict[id]:
4204 if self._columns[v]._type in ('many2one', 'one2one'):
4206 value[v] = value[v][0]
4209 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4210 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4213 cr.execute('update "' + self._table + '" set ' + \
4214 ','.join(upd0) + ' where id = %s', upd1)
4218 # use admin user for accessing objects having rules defined on store fields
4219 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4220 for r in result.keys():
4222 if r in field_dict.keys():
4223 if f in field_dict[r]:
4225 for id, value in result.items():
4226 if self._columns[f]._type in ('many2one', 'one2one'):
4231 cr.execute('update "' + self._table + '" set ' + \
4232 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4238 def perm_write(self, cr, user, ids, fields, context=None):
4239 raise NotImplementedError(_('This method does not exist anymore'))
4241 # TODO: ameliorer avec NULL
4242 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4243 """Computes the WHERE clause needed to implement an OpenERP domain.
4244 :param domain: the domain to compute
4246 :param active_test: whether the default filtering of records with ``active``
4247 field set to ``False`` should be applied.
4248 :return: the query expressing the given domain as provided in domain
4249 :rtype: osv.query.Query
4254 # if the object has a field named 'active', filter out all inactive
4255 # records unless they were explicitely asked for
4256 if 'active' in self._columns and (active_test and context.get('active_test', True)):
4258 active_in_args = False
4260 if a[0] == 'active':
4261 active_in_args = True
4262 if not active_in_args:
4263 domain.insert(0, ('active', '=', 1))
4265 domain = [('active', '=', 1)]
4268 e = expression.expression(cr, user, domain, self, context)
4269 tables = e.get_tables()
4270 where_clause, where_params = e.to_sql()
4271 where_clause = where_clause and [where_clause] or []
4273 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4275 return Query(tables, where_clause, where_params)
4277 def _check_qorder(self, word):
4278 if not regex_order.match(word):
4279 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4282 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4283 """Add what's missing in ``query`` to implement all appropriate ir.rules
4284 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4286 :param query: the current query object
4288 def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None):
4290 if parent_model and child_object:
4291 # as inherited rules are being applied, we need to add the missing JOIN
4292 # to reach the parent table (if it was not JOINed yet in the query)
4293 child_object._inherits_join_add(child_object, parent_model, query)
4294 query.where_clause += added_clause
4295 query.where_clause_params += added_params
4296 for table in added_tables:
4297 if table not in query.tables:
4298 query.tables.append(table)
4302 # apply main rules on the object
4303 rule_obj = self.pool.get('ir.rule')
4304 apply_rule(*rule_obj.domain_get(cr, uid, self._name, mode, context=context))
4306 # apply ir.rules from the parents (through _inherits)
4307 for inherited_model in self._inherits:
4308 kwargs = dict(parent_model=inherited_model, child_object=self) #workaround for python2.5
4309 apply_rule(*rule_obj.domain_get(cr, uid, inherited_model, mode, context=context), **kwargs)
4311 def _generate_m2o_order_by(self, order_field, query):
4313 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4314 either native m2o fields or function/related fields that are stored, including
4315 intermediate JOINs for inheritance if required.
4317 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4319 if order_field not in self._columns and order_field in self._inherit_fields:
4320 # also add missing joins for reaching the table containing the m2o field
4321 qualified_field = self._inherits_join_calc(order_field, query)
4322 order_field_column = self._inherit_fields[order_field][2]
4324 qualified_field = '"%s"."%s"' % (self._table, order_field)
4325 order_field_column = self._columns[order_field]
4327 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4328 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4329 logging.getLogger('orm.search').debug("Many2one function/related fields must be stored " \
4330 "to be used as ordering fields! Ignoring sorting for %s.%s",
4331 self._name, order_field)
4334 # figure out the applicable order_by for the m2o
4335 dest_model = self.pool.get(order_field_column._obj)
4336 m2o_order = dest_model._order
4337 if not regex_order.match(m2o_order):
4338 # _order is complex, can't use it here, so we default to _rec_name
4339 m2o_order = dest_model._rec_name
4341 # extract the field names, to be able to qualify them and add desc/asc
4343 for order_part in m2o_order.split(","):
4344 m2o_order_list.append(order_part.strip().split(" ",1)[0].strip())
4345 m2o_order = m2o_order_list
4347 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4348 # as we don't want to exclude results that have NULL values for the m2o
4349 src_table, src_field = qualified_field.replace('"','').split('.', 1)
4350 query.join((src_table, dest_model._table, src_field, 'id'), outer=True)
4351 qualify = lambda field: '"%s"."%s"' % (dest_model._table, field)
4352 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4355 def _generate_order_by(self, order_spec, query):
4357 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4358 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4360 :raise" except_orm in case order_spec is malformed
4362 order_by_clause = self._order
4364 order_by_elements = []
4365 self._check_qorder(order_spec)
4366 for order_part in order_spec.split(','):
4367 order_split = order_part.strip().split(' ')
4368 order_field = order_split[0].strip()
4369 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4371 if order_field == 'id':
4372 order_by_clause = '"%s"."%s"' % (self._table, order_field)
4373 elif order_field in self._columns:
4374 order_column = self._columns[order_field]
4375 if order_column._classic_read:
4376 inner_clause = '"%s"."%s"' % (self._table, order_field)
4377 elif order_column._type == 'many2one':
4378 inner_clause = self._generate_m2o_order_by(order_field, query)
4380 continue # ignore non-readable or "non-joinable" fields
4381 elif order_field in self._inherit_fields:
4382 parent_obj = self.pool.get(self._inherit_fields[order_field][3])
4383 order_column = parent_obj._columns[order_field]
4384 if order_column._classic_read:
4385 inner_clause = self._inherits_join_calc(order_field, query)
4386 elif order_column._type == 'many2one':
4387 inner_clause = self._generate_m2o_order_by(order_field, query)
4389 continue # ignore non-readable or "non-joinable" fields
4391 if isinstance(inner_clause, list):
4392 for clause in inner_clause:
4393 order_by_elements.append("%s %s" % (clause, order_direction))
4395 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4396 if order_by_elements:
4397 order_by_clause = ",".join(order_by_elements)
4399 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4401 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4403 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4404 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4405 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4406 This is ok at the security level because this method is private and not callable through XML-RPC.
4408 :param access_rights_uid: optional user ID to use when checking access rights
4409 (not for ir.rules, this is only for ir.model.access)
4413 self.check_read(cr, access_rights_uid or user)
4415 # For transient models, restrict acces to the current user, except for the super-user
4416 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4417 args = expression.AND(([('create_uid', '=', user)], args or []))
4419 query = self._where_calc(cr, user, args, context=context)
4420 self._apply_ir_rules(cr, user, query, 'read', context=context)
4421 order_by = self._generate_order_by(order, query)
4422 from_clause, where_clause, where_clause_params = query.get_sql()
4424 limit_str = limit and ' limit %d' % limit or ''
4425 offset_str = offset and ' offset %d' % offset or ''
4426 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4429 cr.execute('SELECT count("%s".id) FROM ' % self._table + from_clause + where_str + limit_str + offset_str, where_clause_params)
4432 cr.execute('SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str, where_clause_params)
4434 return [x[0] for x in res]
4436 # returns the different values ever entered for one field
4437 # this is used, for example, in the client when the user hits enter on
4439 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4442 if field in self._inherit_fields:
4443 return self.pool.get(self._inherit_fields[field][0]).distinct_field_get(cr, uid, field, value, args, offset, limit)
4445 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4447 def copy_data(self, cr, uid, id, default=None, context=None):
4449 Copy given record's data with all its fields values
4451 :param cr: database cursor
4452 :param user: current user id
4453 :param id: id of the record to copy
4454 :param default: field values to override in the original values of the copied record
4455 :type default: dictionary
4456 :param context: context arguments, like lang, time zone
4457 :type context: dictionary
4458 :return: dictionary containing all the field values
4464 # avoid recursion through already copied records in case of circular relationship
4465 seen_map = context.setdefault('__copy_data_seen',{})
4466 if id in seen_map.setdefault(self._name,[]):
4468 seen_map[self._name].append(id)
4472 if 'state' not in default:
4473 if 'state' in self._defaults:
4474 if callable(self._defaults['state']):
4475 default['state'] = self._defaults['state'](self, cr, uid, context)
4477 default['state'] = self._defaults['state']
4479 context_wo_lang = context.copy()
4480 if 'lang' in context:
4481 del context_wo_lang['lang']
4482 data = self.read(cr, uid, [id,], context=context_wo_lang)
4486 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4488 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4489 fields = self.fields_get(cr, uid, context=context)
4491 ftype = fields[f]['type']
4493 if self._log_access and f in LOG_ACCESS_COLUMNS:
4497 data[f] = default[f]
4498 elif 'function' in fields[f]:
4500 elif ftype == 'many2one':
4502 data[f] = data[f] and data[f][0]
4505 elif ftype in ('one2many', 'one2one'):
4507 rel = self.pool.get(fields[f]['relation'])
4509 # duplicate following the order of the ids
4510 # because we'll rely on it later for copying
4511 # translations in copy_translation()!
4513 for rel_id in data[f]:
4514 # the lines are first duplicated using the wrong (old)
4515 # parent but then are reassigned to the correct one thanks
4516 # to the (0, 0, ...)
4517 d = rel.copy_data(cr, uid, rel_id, context=context)
4519 res.append((0, 0, d))
4521 elif ftype == 'many2many':
4522 data[f] = [(6, 0, data[f])]
4526 # make sure we don't break the current parent_store structure and
4527 # force a clean recompute!
4528 for parent_column in ['parent_left', 'parent_right']:
4529 data.pop(parent_column, None)
4530 # Remove _inherits field's from data recursively, missing parents will
4531 # be created by create() (so that copy() copy everything).
4532 def remove_ids(inherits_dict):
4533 for parent_table in inherits_dict:
4534 del data[inherits_dict[parent_table]]
4535 remove_ids(self.pool.get(parent_table)._inherits)
4536 remove_ids(self._inherits)
4539 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4543 # avoid recursion through already copied records in case of circular relationship
4544 seen_map = context.setdefault('__copy_translations_seen',{})
4545 if old_id in seen_map.setdefault(self._name,[]):
4547 seen_map[self._name].append(old_id)
4549 trans_obj = self.pool.get('ir.translation')
4550 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4551 fields = self.fields_get(cr, uid, context=context)
4553 translation_records = []
4554 for field_name, field_def in fields.items():
4555 # we must recursively copy the translations for o2o and o2m
4556 if field_def['type'] in ('one2one', 'one2many'):
4557 target_obj = self.pool.get(field_def['relation'])
4558 old_record, new_record = self.read(cr, uid, [old_id, new_id], [field_name], context=context)
4559 # here we rely on the order of the ids to match the translations
4560 # as foreseen in copy_data()
4561 old_children = sorted(old_record[field_name])
4562 new_children = sorted(new_record[field_name])
4563 for (old_child, new_child) in zip(old_children, new_children):
4564 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4565 # and for translatable fields we keep them for copy
4566 elif field_def.get('translate'):
4568 if field_name in self._columns:
4569 trans_name = self._name + "," + field_name
4570 elif field_name in self._inherit_fields:
4571 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4573 trans_ids = trans_obj.search(cr, uid, [
4574 ('name', '=', trans_name),
4575 ('res_id', '=', old_id)
4577 translation_records.extend(trans_obj.read(cr, uid, trans_ids, context=context))
4579 for record in translation_records:
4581 record['res_id'] = new_id
4582 trans_obj.create(cr, uid, record, context=context)
4585 def copy(self, cr, uid, id, default=None, context=None):
4587 Duplicate record with given id updating it with default values
4589 :param cr: database cursor
4590 :param uid: current user id
4591 :param id: id of the record to copy
4592 :param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4593 :type default: dictionary
4594 :param context: context arguments, like lang, time zone
4595 :type context: dictionary
4601 context = context.copy()
4602 data = self.copy_data(cr, uid, id, default, context)
4603 new_id = self.create(cr, uid, data, context)
4604 self.copy_translations(cr, uid, id, new_id, context)
4607 def exists(self, cr, uid, ids, context=None):
4608 """Checks whether the given id or ids exist in this model,
4609 and return the list of ids that do. This is simple to use for
4610 a truth test on a browse_record::
4615 :param ids: id or list of ids to check for existence
4616 :type ids: int or [int]
4617 :return: the list of ids that currently exist, out of
4620 if type(ids) in (int, long):
4622 query = 'SELECT id FROM "%s"' % (self._table)
4623 cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
4624 return [x[0] for x in cr.fetchall()]
4626 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4627 warnings.warn("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4628 self._name, DeprecationWarning, stacklevel=3)
4629 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4630 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4631 return self._check_recursion(cr, uid, ids, context, parent)
4633 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4635 Verifies that there is no loop in a hierarchical structure of records,
4636 by following the parent relationship using the **parent** field until a loop
4637 is detected or until a top-level record is found.
4639 :param cr: database cursor
4640 :param uid: current user id
4641 :param ids: list of ids of records to check
4642 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4643 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4647 parent = self._parent_name
4649 query = 'SELECT distinct "%s" FROM "%s" WHERE id IN %%s' % (parent, self._table)
4652 for i in range(0, len(ids), cr.IN_MAX):
4653 sub_ids_parent = ids_parent[i:i+cr.IN_MAX]
4654 cr.execute(query, (tuple(sub_ids_parent),))
4655 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4656 ids_parent = ids_parent2
4657 for i in ids_parent:
4662 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4663 """Retrieve the External ID(s) of any database record.
4665 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4667 :return: map of ids to the list of their fully qualified External IDs
4668 in the form ``module.key``, or an empty list when there's no External
4669 ID for a record, e.g.::
4671 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4674 ir_model_data = self.pool.get('ir.model.data')
4675 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4676 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4679 # can't use dict.fromkeys() as the list would be shared!
4681 for record in data_results:
4682 result[record['res_id']].append('%(module)s.%(name)s' % record)
4685 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4686 """Retrieve the External ID of any database record, if there
4687 is one. This method works as a possible implementation
4688 for a function field, to be able to add it to any
4689 model object easily, referencing it as ``Model.get_external_id``.
4691 When multiple External IDs exist for a record, only one
4692 of them is returned (randomly).
4694 :return: map of ids to their fully qualified XML ID,
4695 defaulting to an empty string when there's none
4696 (to be usable as a function field),
4699 { 'id': 'module.ext_id',
4702 results = self._get_xml_ids(cr, uid, ids)
4703 for k, v in results.iteritems():
4710 # backwards compatibility
4711 get_xml_id = get_external_id
4712 _get_xml_ids = _get_external_ids
4715 def is_transient(self):
4716 """ Return whether the model is transient.
4721 return self._transient
4723 def _transient_clean_rows_older_than(self, cr, seconds):
4724 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4725 cr.execute("SELECT id FROM " + self._table + " WHERE"
4726 " COALESCE(write_date, create_date, now())::timestamp <"
4727 " (now() - interval %s)", ("%s seconds" % seconds,))
4728 ids = [x[0] for x in cr.fetchall()]
4729 self.unlink(cr, SUPERUSER_ID, ids)
4731 def _transient_clean_old_rows(self, cr, count):
4732 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4734 "SELECT id, COALESCE(write_date, create_date, now())::timestamp"
4735 " AS t FROM " + self._table +
4736 " ORDER BY t LIMIT %s", (count,))
4737 ids = [x[0] for x in cr.fetchall()]
4738 self.unlink(cr, SUPERUSER_ID, ids)
4740 def _transient_vacuum(self, cr, uid, force=False):
4741 """Clean the transient records.
4743 This unlinks old records from the transient model tables whenever the
4744 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4745 Actual cleaning will happen only once every "_transient_check_time" calls.
4746 This means this method can be called frequently called (e.g. whenever
4747 a new record is created).
4749 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4750 self._transient_check_count += 1
4751 if (not force) and (self._transient_check_count % self._transient_check_time):
4752 self._transient_check_count = 0
4755 # Age-based expiration
4756 if self._transient_max_hours:
4757 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4759 # Count-based expiration
4760 if self._transient_max_count:
4761 self._transient_clean_old_rows(cr, self._transient_max_count)
4765 def resolve_o2m_commands_to_record_dicts(self, cr, uid, field_name, o2m_commands, fields=None, context=None):
4766 """ Serializes o2m commands into record dictionaries (as if
4767 all the o2m records came from the database via a read()), and
4768 returns an iterator over these dictionaries.
4770 Because o2m commands might be creation commands, not all
4771 record ids will contain an ``id`` field. Commands matching an
4772 existing record (UPDATE and LINK_TO) will have an id.
4774 :param field_name: name of the o2m field matching the commands
4775 :type field_name: str
4776 :param o2m_commands: one2many commands to execute on ``field_name``
4777 :type o2m_commands: list((int|False, int|False, dict|False))
4778 :param fields: list of fields to read from the database, when applicable
4779 :type fields: list(str)
4780 :param context: request context
4781 :returns: o2m records in a shape similar to that returned by
4782 ``read()`` (except records may be missing the ``id``
4783 field if they don't exist in db)
4786 o2m_model = self._all_columns[field_name].column._obj
4788 # convert single ids and pairs to tripled commands
4790 for o2m_command in o2m_commands:
4791 if not isinstance(o2m_command, (list, tuple)):
4792 commands.append((4, o2m_command, False))
4793 elif len(o2m_command) == 1:
4794 (command,) = o2m_command
4795 commands.append((command, False, False))
4796 elif len(o2m_command) == 2:
4797 command, id = o2m_command
4798 commands.append((command, id, False))
4800 commands.append(o2m_command)
4802 assert not any(command for command, _, _ in commands if command not in (0, 1, 4)),\
4803 "Only CREATE, UPDATE and LINK_TO commands are supported in resolver"
4805 # extract records to read, by id, in a mapping dict
4806 ids_to_read = [id for (command, id, _) in commands if command in (1, 4)]
4807 records_by_id = dict(
4808 (record['id'], record)
4809 for record in self.pool.get(o2m_model).read(
4810 cr, uid, ids_to_read, fields=fields, context=context))
4813 # merge record from db with record provided by command
4814 for command, id, record in commands:
4816 if command in (1, 4): item.update(records_by_id[id])
4817 if command in (0, 1): item.update(record)
4818 record_dicts.append(item)
4821 # keep this import here, at top it will cause dependency cycle errors
4824 class Model(BaseModel):
4825 """Main super-class for regular database-persisted OpenERP models.
4827 OpenERP models are created by inheriting from this class::
4832 The system will later instantiate the class once per database (on
4833 which the class' module is installed).
4835 _register = False # not visible in ORM registry, meant to be python-inherited only
4836 _transient = False # True in a TransientModel
4838 class TransientModel(BaseModel):
4839 """Model super-class for transient records, meant to be temporarily
4840 persisted, and regularly vaccuum-cleaned.
4842 A TransientModel has a simplified access rights management,
4843 all users can create new records, and may only access the
4844 records they created. The super-user has unrestricted access
4845 to all TransientModel records.
4847 _register = False # not visible in ORM registry, meant to be python-inherited only
4850 class AbstractModel(BaseModel):
4851 """Abstract Model super-class for creating an abstract class meant to be
4852 inherited by regular models (Models or TransientModels) but not meant to
4853 be usable on its own, or persisted.
4855 Technical note: we don't want to make AbstractModel the super-class of
4856 Model or BaseModel because it would not make sense to put the main
4857 definition of persistence methods such as create() in it, and still we
4858 should be able to override them within an AbstractModel.
4860 _auto = False # don't create any database backend for AbstractModels
4861 _register = False # not visible in ORM registry, meant to be python-inherited only
4864 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: