X-Git-Url: http://git.inspyration.org/?a=blobdiff_plain;f=openerp%2Fosv%2Form.py;h=54538ad92375826f1b1b90300a4c47c714c18232;hb=fe39e36f355f887d53c59faef40f63dd3e3686d4;hp=d8a2cec154dada62696c087c04aacc7c354441be;hpb=24b283567d590222d16b25a4e89ce35f2e89c5a3;p=odoo%2Fodoo.git diff --git a/openerp/osv/orm.py b/openerp/osv/orm.py index d8a2cec..54538ad 100644 --- a/openerp/osv/orm.py +++ b/openerp/osv/orm.py @@ -52,7 +52,6 @@ import re import simplejson import time import types -import warnings from lxml import etree import fields @@ -65,6 +64,9 @@ from openerp.tools.translate import _ from openerp import SUPERUSER_ID from query import Query +_logger = logging.getLogger(__name__) +_schema = logging.getLogger(__name__ + '.schema') + # List of etree._Element subclasses that we choose to ignore when parsing XML. from openerp.tools import SKIPPED_ELEMENT_TYPES @@ -216,8 +218,7 @@ def check_object_name(name): def raise_on_invalid_object_name(name): if not check_object_name(name): msg = "The _name attribute %s is not valid." % name - logger = netsvc.Logger() - logger.notifyChannel('orm', netsvc.LOG_ERROR, msg) + _logger.error(msg) raise except_orm('ValueError', msg) POSTGRES_CONFDELTYPES = { @@ -306,29 +307,30 @@ class browse_record(object): user_rec = uobj.browse(cr, uid, 104) name = user_rec.name """ - logger = netsvc.Logger() - def __init__(self, cr, uid, id, table, cache, context=None, list_class=None, fields_process=None): + def __init__(self, cr, uid, id, table, cache, context=None, + list_class=browse_record_list, fields_process=None): """ - @param cache a dictionary of model->field->data to be shared accross browse - objects, thus reducing the SQL read()s . It can speed up things a lot, - but also be disastrous if not discarded after write()/unlink() operations - @param table the object (inherited from orm) - @param context dictionary with an optional context + :param table: the browsed object (inherited from orm) + :param dict cache: a dictionary of model->field->data to be shared + across browse objects, thus reducing the SQL + read()s. It can speed up things a lot, but also be + disastrous if not discarded after write()/unlink() + operations + :param dict context: dictionary with an optional context """ if fields_process is None: fields_process = {} if context is None: context = {} - self._list_class = list_class or browse_record_list + self._list_class = list_class self._cr = cr self._uid = uid self._id = id self._table = table # deprecated, use _model! self._model = table self._table_name = self._table._name - self.__logger = logging.getLogger( - 'osv.browse_record.' + self._table_name) + self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name) self._context = context self._fields_process = fields_process @@ -369,7 +371,7 @@ class browse_record(object): return attr else: error_msg = "Field '%s' does not exist in object '%s'" % (name, self) - self.logger.notifyChannel("browse_record", netsvc.LOG_WARNING, error_msg) + self.__logger.warning(error_msg) raise KeyError(error_msg) # if the field is a classic one or a many2one, we'll fetch all classic and many2one fields @@ -405,7 +407,7 @@ class browse_record(object): if not field_values: # Where did those ids come from? Perhaps old entries in ir_model_dat? - self.__logger.warn("No field_values found for ids %s in %s", ids, self) + _logger.warning("No field_values found for ids %s in %s", ids, self) raise KeyError('Field %s not found in %s'%(name, self)) # create browse records for 'remote' objects for result_line in field_values: @@ -464,10 +466,8 @@ class browse_record(object): if not name in self._data[self._id]: # How did this happen? Could be a missing model due to custom fields used too soon, see above. - self.logger.notifyChannel("browse_record", netsvc.LOG_ERROR, - "Fields to fetch: %s, Field values: %s"%(field_names, field_values)) - self.logger.notifyChannel("browse_record", netsvc.LOG_ERROR, - "Cached: %s, Table: %s"%(self._data[self._id], self._table)) + self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values) + self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table) raise KeyError(_('Unknown attribute %s in %s ') % (name, self)) return self._data[self._id][name] @@ -584,7 +584,7 @@ def get_pg_type(f, type_override=None): else: pg_type = get_pg_type(f, getattr(fields, f._type)) else: - logging.getLogger('orm').warn('%s type not supported!', field_type) + _logger.warning('%s type not supported!', field_type) pg_type = None return pg_type @@ -608,7 +608,16 @@ class MetaModel(type): super(MetaModel, self).__init__(name, bases, attrs) return - module_name = self.__module__.split('.')[0] + # The (OpenERP) module name can be in the `openerp.addons` namespace + # or not. For instance module `sale` can be imported as + # `openerp.addons.sale` (the good way) or `sale` (for backward + # compatibility). + module_parts = self.__module__.split('.') + if len(module_parts) > 2 and module_parts[0] == 'openerp' and \ + module_parts[1] == 'addons': + module_name = self.__module__.split('.')[2] + else: + module_name = self.__module__.split('.')[0] if not hasattr(self, '_module'): self._module = module_name @@ -693,8 +702,6 @@ class BaseModel(object): _log_create = False _sql_constraints = [] _protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists'] - __logger = logging.getLogger('orm') - __schema = logging.getLogger('orm.schema') CONCURRENCY_CHECK_FIELD = '__last_update' @@ -743,7 +750,7 @@ class BaseModel(object): name_id = 'model_'+self._name.replace('.', '_') cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module'])) if not cr.rowcount: - cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, now(), now(), %s, %s, %s)", \ + cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \ (name_id, context['module'], 'ir.model', model_id) ) @@ -809,7 +816,7 @@ class BaseModel(object): cr.execute("select name from ir_model_data where name=%s", (name1,)) if cr.fetchone(): name1 = name1 + "_" + str(id) - cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, now(), now(), %s, %s, %s)", \ + cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \ (name1, context['module'], 'ir.model.fields', id) ) else: @@ -865,11 +872,11 @@ class BaseModel(object): for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]): parent_model = pool.get(parent_name) - if not getattr(cls, '_original_module', None) and name == parent_model._name: - cls._original_module = parent_model._original_module if not parent_model: raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n' 'You may need to add a dependency on the parent class\' module.' % (name, parent_name)) + if not getattr(cls, '_original_module', None) and name == parent_model._name: + cls._original_module = parent_model._original_module parent_class = parent_model.__class__ nattr = {} for s in attributes: @@ -879,6 +886,11 @@ class BaseModel(object): for c in new.keys(): if new[c].manual: del new[c] + # Duplicate float fields because they have a .digits + # cache (which must be per-registry, not server-wide). + for c in new.keys(): + if new[c]._type == 'float': + new[c] = copy.copy(new[c]) if hasattr(new, 'update'): new.update(cls.__dict__.get(s, {})) elif s=='_constraints': @@ -948,8 +960,7 @@ class BaseModel(object): name = type(self).__name__.split('.')[0] msg = "The class %s has to have a _name attribute" % name - logger = netsvc.Logger() - logger.notifyChannel('orm', netsvc.LOG_ERROR, msg) + _logger.error(msg) raise except_orm('ValueError', msg) if not self._description: @@ -1073,6 +1084,31 @@ class BaseModel(object): else: return False + def _get_xml_id(self, cr, uid, r): + model_data = self.pool.get('ir.model.data') + data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])]) + if len(data_ids): + d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0] + if d['module']: + r = '%s.%s' % (d['module'], d['name']) + else: + r = d['name'] + else: + postfix = 0 + while True: + n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' ) + if not model_data.search(cr, uid, [('name', '=', n)]): + break + postfix += 1 + model_data.create(cr, SUPERUSER_ID, { + 'name': n, + 'model': self._name, + 'res_id': r['id'], + 'module': '__export__', + }) + r = '__export__.'+n + return r + lines = [] data = map(lambda x: '', range(len(fields))) done = [] @@ -1082,35 +1118,14 @@ class BaseModel(object): r = row i = 0 while i < len(f): + cols = False if f[i] == '.id': r = r['id'] elif f[i] == 'id': - model_data = self.pool.get('ir.model.data') - data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])]) - if len(data_ids): - d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0] - if d['module']: - r = '%s.%s' % (d['module'], d['name']) - else: - r = d['name'] - else: - postfix = 0 - while True: - n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' ) - if not model_data.search(cr, uid, [('name', '=', n)]): - break - postfix += 1 - model_data.create(cr, uid, { - 'name': n, - 'model': self._name, - 'res_id': r['id'], - 'module': '__export__', - }) - r = n + r = _get_xml_id(self, cr, uid, r) else: r = r[f[i]] # To display external name of selection field when its exported - cols = False if f[i] in self._columns.keys(): cols = self._columns[f[i]] elif f[i] in self._inherit_fields.keys(): @@ -1135,8 +1150,12 @@ class BaseModel(object): if [x for x in fields2 if x]: break done.append(fields2) + if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'): + data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r]) + break + for row2 in r: - lines2 = self.__export_row(cr, uid, row2, fields2, + lines2 = row2._model.__export_row(cr, uid, row2, fields2, context) if first: for fpos2 in range(len(fields)): @@ -1330,7 +1349,7 @@ class BaseModel(object): newfd = relation_obj.fields_get( cr, uid, context=context ) pos = position - res = many_ids(line[i], relation, current_module, mode) + res = [] first = 0 while pos < len(datas): @@ -1342,9 +1361,6 @@ class BaseModel(object): warning += w2 first += 1 - if data_res_id2: - res.append((4, data_res_id2)) - if (not newrow) or not reduce(lambda x, y: x or y, newrow.values(), 0): break @@ -1370,7 +1386,7 @@ class BaseModel(object): res = key break if line[i] and not res: - logging.getLogger('orm.import').warn( + _logger.warning( _("key '%s' not found in selection field '%s'"), tools.ustr(line[i]), tools.ustr(field_name)) warning.append(_("Key/value '%s' not found in selection field '%s'") % ( @@ -1712,6 +1728,10 @@ class BaseModel(object): trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('sum')) if trans: node.set('sum', trans) + if node.get('avg'): + trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('avg')) + if trans: + node.set('avg', trans) if node.get('help'): trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('help')) if trans: @@ -1789,7 +1809,7 @@ class BaseModel(object): res.insert(0, ("Can't find field '%s' in the following view parts composing the view of object model '%s':" % (field, model), None)) msg = "\n * ".join([r[0] for r in res]) msg += "\n\nEither you wrongly customized this view, or some modules bringing those views are not compatible with your current data model" - netsvc.Logger().notifyChannel('orm', netsvc.LOG_ERROR, msg) + _logger.error(msg) raise except_orm('View error', msg) return arch, fields @@ -2373,6 +2393,7 @@ class BaseModel(object): try: getattr(self, '_ormcache') self._ormcache = {} + self.pool._any_cache_cleared = True except AttributeError: pass @@ -2611,8 +2632,7 @@ class BaseModel(object): def _parent_store_compute(self, cr): if not self._parent_store: return - logger = netsvc.Logger() - logger.notifyChannel('data', netsvc.LOG_INFO, 'Computing parent left and right for table %s...' % (self._table, )) + _logger.info('Computing parent left and right for table %s...', self._table) def browse_rec(root, pos=0): # TODO: set order where = self._parent_name+'='+str(root) @@ -2636,8 +2656,7 @@ class BaseModel(object): return True def _update_store(self, cr, f, k): - logger = netsvc.Logger() - logger.notifyChannel('data', netsvc.LOG_INFO, "storing computed values of fields.function '%s'" % (k,)) + _logger.info("storing computed values of fields.function '%s'", k) ss = self._columns[k]._symbol_set update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0]) cr.execute('select id from '+self._table) @@ -2676,7 +2695,7 @@ class BaseModel(object): elif val in dict(self._columns[field].selection(self, cr, uid, context=context)): return raise except_orm(_('ValidateError'), - _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field)) + _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field)) def _check_removed_columns(self, cr, log=False): # iterate on the database columns to drop the NOT NULL constraints @@ -2693,12 +2712,12 @@ class BaseModel(object): for column in cr.dictfetchall(): if log: - self.__logger.debug("column %s is in the table %s but not in the corresponding object %s", - column['attname'], self._table, self._name) + _logger.debug("column %s is in the table %s but not in the corresponding object %s", + column['attname'], self._table, self._name) if column['attnotnull']: cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname'])) - self.__schema.debug("Table '%s': column '%s': dropped NOT NULL constraint", - self._table, column['attname']) + _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint", + self._table, column['attname']) # checked version: for direct m2o starting from `self` def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete): @@ -2710,14 +2729,58 @@ class BaseModel(object): # So unless stated otherwise we default them to ondelete=cascade. ondelete = ondelete or 'cascade' self._foreign_keys.append((self._table, source_field, dest_model._table, ondelete or 'set null')) - self.__schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", - self._table, source_field, dest_model._table, ondelete) + _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", + self._table, source_field, dest_model._table, ondelete) # unchecked version: for custom cases, such as m2m relationships def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete): self._foreign_keys.append((source_table, source_field, dest_model._table, ondelete or 'set null')) - self.__schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", - source_table, source_field, dest_model._table, ondelete) + _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", + source_table, source_field, dest_model._table, ondelete) + + def _drop_constraint(self, cr, source_table, constraint_name): + cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name)) + + def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete): + # Find FK constraint(s) currently established for the m2o field, + # and see whether they are stale or not + cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name, + cl2.relname as foreign_table + FROM pg_constraint as con, pg_class as cl1, pg_class as cl2, + pg_attribute as att1, pg_attribute as att2 + WHERE con.conrelid = cl1.oid + AND cl1.relname = %s + AND con.confrelid = cl2.oid + AND array_lower(con.conkey, 1) = 1 + AND con.conkey[1] = att1.attnum + AND att1.attrelid = cl1.oid + AND att1.attname = %s + AND array_lower(con.confkey, 1) = 1 + AND con.confkey[1] = att2.attnum + AND att2.attrelid = cl2.oid + AND att2.attname = %s + AND con.contype = 'f'""", (source_table, source_field, 'id')) + constraints = cr.dictfetchall() + if constraints: + if len(constraints) == 1: + # Is it the right constraint? + cons, = constraints + if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\ + or cons['foreign_table'] != dest_model._table: + _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'", + source_table, cons['constraint_name']) + self._drop_constraint(cr, source_table, cons['constraint_name']) + self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete) + # else it's all good, nothing to do! + else: + # Multiple FKs found for the same field, drop them all, and re-create + for cons in constraints: + _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'", + source_table, cons['constraint_name']) + self._drop_constraint(cr, source_table, cons['constraint_name']) + self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete) + + def _auto_init(self, cr, context=None): """ @@ -2791,8 +2854,8 @@ class BaseModel(object): cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k)) res['attname'] = k column_data[k] = res - self.__schema.debug("Table '%s': renamed column '%s' to '%s'", - self._table, f.oldname, k) + _schema.debug("Table '%s': renamed column '%s' to '%s'", + self._table, f.oldname, k) # The field already exists in database. Possibly # change its type, rename it, drop it or change its @@ -2803,12 +2866,12 @@ class BaseModel(object): f_pg_notnull = res['attnotnull'] if isinstance(f, fields.function) and not f.store and\ not getattr(f, 'nodrop', False): - self.__logger.info('column %s (%s) in table %s removed: converted to a function !\n', - k, f.string, self._table) + _logger.info('column %s (%s) in table %s removed: converted to a function !\n', + k, f.string, self._table) cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k)) cr.commit() - self.__schema.debug("Table '%s': dropped column '%s' with cascade", - self._table, k) + _schema.debug("Table '%s': dropped column '%s' with cascade", + self._table, k) f_obj_type = None else: f_obj_type = get_pg_type(f) and get_pg_type(f)[0] @@ -2830,7 +2893,7 @@ class BaseModel(object): cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size))) cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,)) cr.commit() - self.__schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s", + _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s", self._table, k, f_pg_size, f.size) for c in casts: if (f_pg_type==c[0]) and (f._type==c[1]): @@ -2841,7 +2904,7 @@ class BaseModel(object): cr.execute(('UPDATE "%s" SET "%s"=temp_change_size'+c[3]) % (self._table, k)) cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,)) cr.commit() - self.__schema.debug("Table '%s': column '%s' changed type from %s to %s", + _schema.debug("Table '%s': column '%s' changed type from %s to %s", self._table, k, c[0], c[1]) break @@ -2862,7 +2925,7 @@ class BaseModel(object): cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname)) cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1])) cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,)) - self.__schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !", + _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !", self._table, k, f_pg_type, f._type, newname) # if the field is required and hasn't got a NOT NULL constraint @@ -2883,19 +2946,19 @@ class BaseModel(object): try: cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False) cr.commit() - self.__schema.debug("Table '%s': column '%s': added NOT NULL constraint", - self._table, k) + _schema.debug("Table '%s': column '%s': added NOT NULL constraint", + self._table, k) except Exception: msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\ "If you want to have it, you should update the records and execute manually:\n"\ "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL" - self.__schema.warn(msg, self._table, k, self._table, k) + _schema.warning(msg, self._table, k, self._table, k) cr.commit() elif not f.required and f_pg_notnull == 1: cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k)) cr.commit() - self.__schema.debug("Table '%s': column '%s': dropped NOT NULL constraint", - self._table, k) + _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint", + self._table, k) # Verify index indexname = '%s_%s_index' % (self._table, k) cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table)) @@ -2909,40 +2972,17 @@ class BaseModel(object): "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\ " because there is a length limit for indexable btree values!\n"\ "Use a search view instead if you simply want to make the field searchable." - self.__schema.warn(msg, self._table, k, f._type) + _schema.warning(msg, self._table, k, f._type) if res2 and not f.select: cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k)) cr.commit() msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore" - self.__schema.debug(msg, self._table, k, f._type) + _schema.debug(msg, self._table, k, f._type) if isinstance(f, fields.many2one): dest_model = self.pool.get(f._obj) - ref = dest_model._table - if ref != 'ir_actions': - cr.execute('SELECT confdeltype, conname FROM pg_constraint as con, pg_class as cl1, pg_class as cl2, ' - 'pg_attribute as att1, pg_attribute as att2 ' - 'WHERE con.conrelid = cl1.oid ' - 'AND cl1.relname = %s ' - 'AND con.confrelid = cl2.oid ' - 'AND cl2.relname = %s ' - 'AND array_lower(con.conkey, 1) = 1 ' - 'AND con.conkey[1] = att1.attnum ' - 'AND att1.attrelid = cl1.oid ' - 'AND att1.attname = %s ' - 'AND array_lower(con.confkey, 1) = 1 ' - 'AND con.confkey[1] = att2.attnum ' - 'AND att2.attrelid = cl2.oid ' - 'AND att2.attname = %s ' - "AND con.contype = 'f'", (self._table, ref, k, 'id')) - res2 = cr.dictfetchall() - if res2: - if res2[0]['confdeltype'] != POSTGRES_CONFDELTYPES.get((f.ondelete or 'set null').upper(), 'a'): - cr.execute('ALTER TABLE "' + self._table + '" DROP CONSTRAINT "' + res2[0]['conname'] + '"') - self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete) - cr.commit() - self.__schema.debug("Table '%s': column '%s': XXX", - self._table, k) + if dest_model._table != 'ir_actions': + self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete) # The field doesn't exist in database. Create it if necessary. else: @@ -2950,7 +2990,7 @@ class BaseModel(object): # add the missing field cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1])) cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,)) - self.__schema.debug("Table '%s': added column '%s' with definition=%s", + _schema.debug("Table '%s': added column '%s' with definition=%s", self._table, k, get_pg_type(f)[1]) # initialize it @@ -2964,7 +3004,7 @@ class BaseModel(object): query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0]) cr.execute(query, (ss[1](default),)) cr.commit() - netsvc.Logger().notifyChannel('data', netsvc.LOG_DEBUG, "Table '%s': setting default value of new column %s" % (self._table, k)) + _logger.debug("Table '%s': setting default value of new column %s", self._table, k) # remember the functions to call for the stored fields if isinstance(f, fields.function): @@ -2988,14 +3028,14 @@ class BaseModel(object): try: cr.commit() cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False) - self.__schema.debug("Table '%s': column '%s': added a NOT NULL constraint", + _schema.debug("Table '%s': column '%s': added a NOT NULL constraint", self._table, k) except Exception: msg = "WARNING: unable to set column %s of table %s not null !\n"\ "Try to re-run: openerp-server --update=module\n"\ "If it doesn't work, update records and execute manually:\n"\ "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL" - self.__logger.warn(msg, k, self._table, self._table, k) + _logger.warning(msg, k, self._table, self._table, k) cr.commit() else: @@ -3032,7 +3072,7 @@ class BaseModel(object): def _create_table(self, cr): cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,)) cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,)) - self.__schema.debug("Table '%s': created", self._table) + _schema.debug("Table '%s': created", self._table) def _parent_columns_exist(self, cr): @@ -3047,24 +3087,24 @@ class BaseModel(object): cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,)) cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,)) if 'parent_left' not in self._columns: - self.__logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)', - self._table) - self.__schema.debug("Table '%s': added column '%s' with definition=%s", - self._table, 'parent_left', 'INTEGER') + _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)', + self._table) + _schema.debug("Table '%s': added column '%s' with definition=%s", + self._table, 'parent_left', 'INTEGER') elif not self._columns['parent_left'].select: - self.__logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)', - self._table) + _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)', + self._table) if 'parent_right' not in self._columns: - self.__logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)', - self._table) - self.__schema.debug("Table '%s': added column '%s' with definition=%s", - self._table, 'parent_right', 'INTEGER') + _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)', + self._table) + _schema.debug("Table '%s': added column '%s' with definition=%s", + self._table, 'parent_right', 'INTEGER') elif not self._columns['parent_right'].select: - self.__logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)', - self._table) + _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)', + self._table) if self._columns[self._parent_name].ondelete != 'cascade': - self.__logger.error("The column %s on object %s must be set as ondelete='cascade'", - self._parent_name, self._name) + _logger.error("The column %s on object %s must be set as ondelete='cascade'", + self._parent_name, self._name) cr.commit() @@ -3079,8 +3119,8 @@ class BaseModel(object): if not cr.rowcount: cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def)) cr.commit() - self.__schema.debug("Table '%s': added column '%s' with definition=%s", - self._table, field, field_def) + _schema.debug("Table '%s': added column '%s' with definition=%s", + self._table, field, field_def) def _select_column_data(self, cr): @@ -3128,7 +3168,7 @@ class BaseModel(object): cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2)) cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref)) cr.commit() - self.__schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref) + _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref) def _add_sql_constraints(self, cr): @@ -3138,6 +3178,9 @@ class BaseModel(object): _sql_constraints. """ + def unify_cons_text(txt): + return txt.lower().replace(', ',',').replace(' (','(') + for (key, con, _) in self._sql_constraints: conname = '%s_%s' % (self._table, key) @@ -3167,7 +3210,7 @@ class BaseModel(object): # constraint does not exists: sql_actions['add']['execute'] = True sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], ) - elif con.lower() not in [item['condef'].lower() for item in existing_constraints]: + elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]: # constraint exists but its definition has changed: sql_actions['drop']['execute'] = True sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), ) @@ -3181,9 +3224,9 @@ class BaseModel(object): try: cr.execute(sql_action['query']) cr.commit() - self.__schema.debug(sql_action['msg_ok']) + _schema.debug(sql_action['msg_ok']) except: - self.__schema.warn(sql_action['msg_err']) + _schema.warning(sql_action['msg_err']) cr.rollback() @@ -3240,11 +3283,11 @@ class BaseModel(object): def _inherits_check(self): for table, field_name in self._inherits.items(): if field_name not in self._columns: - logging.getLogger('init').info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.' % (field_name, self._name)) + _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name) self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table, required=True, ondelete="cascade") elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() != "cascade": - logging.getLogger('init').warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade", forcing it.' % (field_name, self._name)) + _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade", forcing it.', field_name, self._name) self._columns[field_name].required = True self._columns[field_name].ondelete = "cascade" @@ -3402,8 +3445,8 @@ class BaseModel(object): return "date_trunc('second', %s) as %s" % (f_qual, f) if f == self.CONCURRENCY_CHECK_FIELD: if self._log_access: - return "COALESCE(%s.write_date, %s.create_date, now())::timestamp AS %s" % (self._table, self._table, f,) - return "now()::timestamp AS %s" % (f,) + return "COALESCE(%s.write_date, %s.create_date, (now() at time zone 'UTC'))::timestamp AS %s" % (self._table, self._table, f,) + return "(now() at time zone 'UTC')::timestamp AS %s" % (f,) if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False): return 'length(%s) as "%s"' % (f_qual, f) return f_qual @@ -3584,7 +3627,7 @@ class BaseModel(object): return if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access): return - check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, now())::timestamp)" + check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)" for sub_ids in cr.split_for_in_conditions(ids): ids_to_check = [] for id in sub_ids: @@ -3680,13 +3723,19 @@ class BaseModel(object): self.check_unlink(cr, uid) - properties = self.pool.get('ir.property') + ir_property = self.pool.get('ir.property') + + # Check if the records are used as default properties. domain = [('res_id', '=', False), ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]), ] - if properties.search(cr, uid, domain, context=context): + if ir_property.search(cr, uid, domain, context=context): raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property')) + # Delete the records' properties. + property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context) + ir_property.unlink(cr, uid, property_ids, context=context) + wf_service = netsvc.LocalService("workflow") for oid in ids: wf_service.trg_delete(uid, self._name, oid, cr) @@ -3799,10 +3848,6 @@ class BaseModel(object): if readonly[0][0] >= 1: edit = True break - elif readonly[0][0] == 0: - edit = False - else: - edit = False if not edit: vals.pop(field) @@ -3866,7 +3911,7 @@ class BaseModel(object): if self._log_access: upd0.append('write_uid=%s') - upd0.append('write_date=now()') + upd0.append("write_date=(now() at time zone 'UTC')") upd1.append(user) if len(upd0): @@ -3921,7 +3966,7 @@ class BaseModel(object): self.pool.get(table).write(cr, user, nids, v, context) if unknown_fields: - self.__logger.warn( + _logger.warning( 'No such field(s) in model %s: %s.', self._name, ', '.join(unknown_fields)) self._validate(cr, user, ids, context) @@ -4057,7 +4102,7 @@ class BaseModel(object): del vals[v] unknown_fields.append(v) if unknown_fields: - self.__logger.warn( + _logger.warning( 'No such field(s) in model %s: %s.', self._name, ', '.join(unknown_fields)) @@ -4075,11 +4120,16 @@ class BaseModel(object): del vals[self._inherits[table]] record_id = tocreate[table].pop('id', None) - + + # When linking/creating parent records, force context without 'no_store_function' key that + # defers stored functions computing, as these won't be computed in batch at the end of create(). + parent_context = dict(context) + parent_context.pop('no_store_function', None) + if record_id is None or not record_id: - record_id = self.pool.get(table).create(cr, user, tocreate[table], context=context) + record_id = self.pool.get(table).create(cr, user, tocreate[table], context=parent_context) else: - self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=context) + self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=parent_context) upd0 += ',' + self._inherits[table] upd1 += ',%s' @@ -4133,7 +4183,7 @@ class BaseModel(object): self._check_selection_field_value(cr, user, field, vals[field], context=context) if self._log_access: upd0 += ',create_uid,create_date' - upd1 += ',%s,now()' + upd1 += ",%s,(now() at time zone 'UTC')" upd2.append(user) cr.execute('insert into "'+self._table+'" (id'+upd0+") values ("+str(id_new)+upd1+')', tuple(upd2)) self.check_access_rule(cr, user, [id_new], 'create', context=context) @@ -4197,7 +4247,7 @@ class BaseModel(object): """Fetch records as objects allowing to use dot notation to browse fields and relations :param cr: database cursor - :param user: current user id + :param uid: current user id :param select: id or list of ids. :param context: context arguments, like lang, time zone :rtype: object or list of objects requested @@ -4366,13 +4416,11 @@ class BaseModel(object): domain = domain[:] # if the object has a field named 'active', filter out all inactive # records unless they were explicitely asked for - if 'active' in self._columns and (active_test and context.get('active_test', True)): + if 'active' in self._all_columns and (active_test and context.get('active_test', True)): if domain: - active_in_args = False - for a in domain: - if a[0] == 'active': - active_in_args = True - if not active_in_args: + # the item[0] trick below works for domain items and '&'/'|'/'!' + # operators too + if not any(item[0] == 'active' for item in domain): domain.insert(0, ('active', '=', 1)) else: domain = [('active', '=', 1)] @@ -4439,9 +4487,9 @@ class BaseModel(object): assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()' if not order_field_column._classic_write and not getattr(order_field_column, 'store', False): - logging.getLogger('orm.search').debug("Many2one function/related fields must be stored " \ - "to be used as ordering fields! Ignoring sorting for %s.%s", - self._name, order_field) + _logger.debug("Many2one function/related fields must be stored " \ + "to be used as ordering fields! Ignoring sorting for %s.%s", + self._name, order_field) return # figure out the applicable order_by for the m2o @@ -4737,8 +4785,8 @@ class BaseModel(object): return [x[0] for x in cr.fetchall()] def check_recursion(self, cr, uid, ids, context=None, parent=None): - warnings.warn("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \ - self._name, DeprecationWarning, stacklevel=3) + _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \ + self._name) assert parent is None or parent in self._columns or parent in self._inherit_fields,\ "The 'parent' parameter passed to check_recursion() must be None or a valid field name" return self._check_recursion(cr, uid, ids, context, parent) @@ -4836,15 +4884,15 @@ class BaseModel(object): def _transient_clean_rows_older_than(self, cr, seconds): assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name cr.execute("SELECT id FROM " + self._table + " WHERE" - " COALESCE(write_date, create_date, now())::timestamp <" - " (now() - interval %s)", ("%s seconds" % seconds,)) + " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp <" + " ((now() at time zone 'UTC') - interval %s)", ("%s seconds" % seconds,)) ids = [x[0] for x in cr.fetchall()] self.unlink(cr, SUPERUSER_ID, ids) def _transient_clean_old_rows(self, cr, count): assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name cr.execute( - "SELECT id, COALESCE(write_date, create_date, now())::timestamp" + "SELECT id, COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp" " AS t FROM " + self._table + " ORDER BY t LIMIT %s", (count,)) ids = [x[0] for x in cr.fetchall()]