import re
import simplejson
import time
-import traceback
import types
-import warnings
from lxml import etree
import fields
from openerp import SUPERUSER_ID
from query import Query
+_logger = logging.getLogger(__name__)
+_schema = logging.getLogger(__name__ + '.schema')
+
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from openerp.tools import SKIPPED_ELEMENT_TYPES
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
- logger = netsvc.Logger()
- logger.notifyChannel('orm', netsvc.LOG_ERROR, msg)
+ _logger.error(msg)
raise except_orm('ValueError', msg)
POSTGRES_CONFDELTYPES = {
'SET DEFAULT': 'd',
}
-def last_day_of_current_month():
- today = datetime.date.today()
- last_day = str(calendar.monthrange(today.year, today.month)[1])
- return time.strftime('%Y-%m-' + last_day)
-
def intersect(la, lb):
return filter(lambda x: x in lb, la)
user_rec = uobj.browse(cr, uid, 104)
name = user_rec.name
"""
- logger = netsvc.Logger()
- def __init__(self, cr, uid, id, table, cache, context=None, list_class=None, fields_process=None):
+ def __init__(self, cr, uid, id, table, cache, context=None,
+ list_class=browse_record_list, fields_process=None):
"""
- @param cache a dictionary of model->field->data to be shared accross browse
- objects, thus reducing the SQL read()s . It can speed up things a lot,
- but also be disastrous if not discarded after write()/unlink() operations
- @param table the object (inherited from orm)
- @param context dictionary with an optional context
+ :param table: the browsed object (inherited from orm)
+ :param dict cache: a dictionary of model->field->data to be shared
+ across browse objects, thus reducing the SQL
+ read()s. It can speed up things a lot, but also be
+ disastrous if not discarded after write()/unlink()
+ operations
+ :param dict context: dictionary with an optional context
"""
if fields_process is None:
fields_process = {}
if context is None:
context = {}
- self._list_class = list_class or browse_record_list
+ self._list_class = list_class
self._cr = cr
self._uid = uid
self._id = id
self._table = table # deprecated, use _model!
self._model = table
self._table_name = self._table._name
- self.__logger = logging.getLogger(
- 'osv.browse_record.' + self._table_name)
+ self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name)
self._context = context
self._fields_process = fields_process
cache.setdefault(table._name, {})
self._data = cache[table._name]
- if not (id and isinstance(id, (int, long,))):
- raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
+# if not (id and isinstance(id, (int, long,))):
+# raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
# if not table.exists(cr, uid, id, context):
# raise BrowseRecordError(_('Object %s does not exists') % (self,))
return attr
else:
error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
- self.logger.notifyChannel("browse_record", netsvc.LOG_WARNING, error_msg)
+ self.__logger.warning(error_msg)
raise KeyError(error_msg)
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
if not field_values:
# Where did those ids come from? Perhaps old entries in ir_model_dat?
- self.__logger.warn("No field_values found for ids %s in %s", ids, self)
+ _logger.warning("No field_values found for ids %s in %s", ids, self)
raise KeyError('Field %s not found in %s'%(name, self))
# create browse records for 'remote' objects
for result_line in field_values:
if not name in self._data[self._id]:
# How did this happen? Could be a missing model due to custom fields used too soon, see above.
- self.logger.notifyChannel("browse_record", netsvc.LOG_ERROR,
- "Fields to fetch: %s, Field values: %s"%(field_names, field_values))
- self.logger.notifyChannel("browse_record", netsvc.LOG_ERROR,
- "Cached: %s, Table: %s"%(self._data[self._id], self._table))
+ self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values)
+ self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table)
raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
return self._data[self._id][name]
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
+ fields.serialized: 'text',
}
def get_pg_type(f, type_override=None):
else:
pg_type = get_pg_type(f, getattr(fields, f._type))
else:
- logging.getLogger('orm').warn('%s type not supported!', field_type)
+ _logger.warning('%s type not supported!', field_type)
pg_type = None
return pg_type
super(MetaModel, self).__init__(name, bases, attrs)
return
- module_name = self.__module__.split('.')[0]
+ # The (OpenERP) module name can be in the `openerp.addons` namespace
+ # or not. For instance module `sale` can be imported as
+ # `openerp.addons.sale` (the good way) or `sale` (for backward
+ # compatibility).
+ module_parts = self.__module__.split('.')
+ if len(module_parts) > 2 and module_parts[0] == 'openerp' and \
+ module_parts[1] == 'addons':
+ module_name = self.__module__.split('.')[2]
+ else:
+ module_name = self.__module__.split('.')[0]
if not hasattr(self, '_module'):
self._module = module_name
_sequence = None
_description = None
+ # dict of {field:method}, with method returning the name_get of records
+ # to include in the _read_group, if grouped on this field
+ _group_by_full = {}
+
# Transience
_transient = False # True in a TransientModel
_transient_max_count = None
_log_create = False
_sql_constraints = []
_protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
- __logger = logging.getLogger('orm')
- __schema = logging.getLogger('orm.schema')
CONCURRENCY_CHECK_FIELD = '__last_update'
name_id = 'model_'+self._name.replace('.', '_')
cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
if not cr.rowcount:
- cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, now(), now(), %s, %s, %s)", \
+ cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name_id, context['module'], 'ir.model', model_id)
)
for rec in cr.dictfetchall():
cols[rec['name']] = rec
- for (k, f) in self._columns.items():
+ ir_model_fields_obj = self.pool.get('ir.model.fields')
+
+ # sparse field should be created at the end, as it depends on its serialized field already existing
+ model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
+ for (k, f) in model_fields:
vals = {
'model_id': model_id,
'model': self._name,
'selectable': (f.selectable and 1) or 0,
'translate': (f.translate and 1) or 0,
'relation_field': (f._type=='one2many' and isinstance(f, fields.one2many)) and f._fields_id or '',
+ 'serialization_field_id': None,
}
+ if getattr(f, 'serialization_field', None):
+ # resolve link to serialization_field if specified by name
+ serialization_field_id = ir_model_fields_obj.search(cr, 1, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
+ if not serialization_field_id:
+ raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
+ vals['serialization_field_id'] = serialization_field_id[0]
+
# When its a custom field,it does not contain f.select
if context.get('field_state', 'base') == 'manual':
if context.get('field_name', '') == k:
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
- relation,view_load,state,select_level,relation_field, translate
+ relation,view_load,state,select_level,relation_field, translate, serialization_field_id
) VALUES (
- %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
+ %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], bool(vals['view_load']), 'base',
- vals['select_level'], vals['relation_field'], bool(vals['translate'])
+ vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.execute("select name from ir_model_data where name=%s", (name1,))
if cr.fetchone():
name1 = name1 + "_" + str(id)
- cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, now(), now(), %s, %s, %s)", \
+ cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name1, context['module'], 'ir.model.fields', id)
)
else:
cr.commit()
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
- view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s
+ view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'], bool(vals['view_load']),
- vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['model'], vals['name']
+ vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
))
break
cr.commit()
for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
parent_model = pool.get(parent_name)
- if not getattr(cls, '_original_module', None) and name == parent_model._name:
- cls._original_module = parent_model._original_module
if not parent_model:
raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
+ if not getattr(cls, '_original_module', None) and name == parent_model._name:
+ cls._original_module = parent_model._original_module
parent_class = parent_model.__class__
nattr = {}
for s in attributes:
for c in new.keys():
if new[c].manual:
del new[c]
+ # Duplicate float fields because they have a .digits
+ # cache (which must be per-registry, not server-wide).
+ for c in new.keys():
+ if new[c]._type == 'float':
+ new[c] = copy.copy(new[c])
if hasattr(new, 'update'):
new.update(cls.__dict__.get(s, {}))
elif s=='_constraints':
name = type(self).__name__.split('.')[0]
msg = "The class %s has to have a _name attribute" % name
- logger = netsvc.Logger()
- logger.notifyChannel('orm', netsvc.LOG_ERROR, msg)
+ _logger.error(msg)
raise except_orm('ValueError', msg)
if not self._description:
#'select': int(field['select_level'])
}
- if field['ttype'] == 'selection':
+ if field['serialization_field_id']:
+ cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
+ attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
+ if field['ttype'] in ['many2one', 'one2many', 'many2many']:
+ attrs.update({'relation': field['relation']})
+ self._columns[field['name']] = fields.sparse(**attrs)
+ elif field['ttype'] == 'selection':
self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
elif field['ttype'] == 'reference':
self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
else:
return False
+ def _get_xml_id(self, cr, uid, r):
+ model_data = self.pool.get('ir.model.data')
+ data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])])
+ if len(data_ids):
+ d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
+ if d['module']:
+ r = '%s.%s' % (d['module'], d['name'])
+ else:
+ r = d['name']
+ else:
+ postfix = 0
+ while True:
+ n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
+ if not model_data.search(cr, uid, [('name', '=', n)]):
+ break
+ postfix += 1
+ model_data.create(cr, SUPERUSER_ID, {
+ 'name': n,
+ 'model': self._name,
+ 'res_id': r['id'],
+ 'module': '__export__',
+ })
+ r = '__export__.'+n
+ return r
+
lines = []
data = map(lambda x: '', range(len(fields)))
done = []
r = row
i = 0
while i < len(f):
+ cols = False
if f[i] == '.id':
r = r['id']
elif f[i] == 'id':
- model_data = self.pool.get('ir.model.data')
- data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])])
- if len(data_ids):
- d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
- if d['module']:
- r = '%s.%s' % (d['module'], d['name'])
- else:
- r = d['name']
- else:
- postfix = 0
- while True:
- n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
- if not model_data.search(cr, uid, [('name', '=', n)]):
- break
- postfix += 1
- model_data.create(cr, uid, {
- 'name': n,
- 'model': self._name,
- 'res_id': r['id'],
- 'module': '__export__',
- })
- r = n
+ r = _get_xml_id(self, cr, uid, r)
else:
r = r[f[i]]
# To display external name of selection field when its exported
- cols = False
if f[i] in self._columns.keys():
cols = self._columns[f[i]]
elif f[i] in self._inherit_fields.keys():
if [x for x in fields2 if x]:
break
done.append(fields2)
+ if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'):
+ data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r])
+ break
+
for row2 in r:
- lines2 = self.__export_row(cr, uid, row2, fields2,
+ lines2 = row2._model.__export_row(cr, uid, row2, fields2,
context)
if first:
for fpos2 in range(len(fields)):
return {'datas': datas}
def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
- """
- Import given data in given module
+ """Import given data in given module
This method is used when importing data via client menu.
order_line/product_uom_qty,
order_line/product_uom/id (=xml_id)
- This method returns a 4-tuple with the following structure:
+ This method returns a 4-tuple with the following structure::
- * The first item is a return code, it returns either ``-1`` in case o
+ (return_code, errored_resource, error_message, unused)
- :param cr: database cursor
- :param uid: current user id
- :param fields: list of fields
+ * The first item is a return code, it is ``-1`` in case of
+ import error, or the last imported row number in case of success
+ * The second item contains the record data dict that failed to import
+ in case of error, otherwise it's 0
+ * The third item contains an error message string in case of error,
+ otherwise it's 0
+ * The last item is currently unused, with no specific semantics
+
+ :param fields: list of fields to import
:param data: data to import
:param mode: 'init' or 'update' for record creation
:param current_module: module name
:param noupdate: flag for record creation
- :param context: context arguments, like lang, time zone,
:param filename: optional file to store partial import state for recovery
- :returns: 4-tuple of a return code, an errored resource, an error message and ???
- :rtype: (int, dict|0, str|0, ''|0)
+ :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
+ :rtype: (int, dict or 0, str or 0, str or 0)
"""
if not context:
context = {}
fields = map(fix_import_export_id_paths, fields)
- logger = netsvc.Logger()
ir_model_data_obj = self.pool.get('ir.model.data')
# mode: id (XML id) or .id (database id) or False for name_get
nbrmax = position+1
done = {}
- for i in range(len(fields)):
+ for i, field in enumerate(fields):
res = False
if i >= len(line):
raise Exception(_('Please check that all your lines have %d columns.'
if not line[i]:
continue
- field = fields[i]
if field[:len(prefix)] <> prefix:
if line[i] and skip:
return False
continue
+ field_name = field[len(prefix)]
#set the mode for m2o, o2m, m2m : xml_id/id/name
if len(field) == len(prefix)+1:
return [(6,0,res)]
# ID of the record using a XML ID
- if field[len(prefix)]=='id':
+ if field_name == 'id':
try:
- data_res_id = _get_id(model_name, line[i], current_module, 'id')
+ data_res_id = _get_id(model_name, line[i], current_module)
except ValueError:
pass
xml_id = line[i]
continue
# ID of the record using a database ID
- elif field[len(prefix)]=='.id':
+ elif field_name == '.id':
data_res_id = _get_id(model_name, line[i], current_module, '.id')
continue
+ field_type = fields_def[field_name]['type']
# recursive call for getting children and returning [(0,0,{})] or [(1,ID,{})]
- if fields_def[field[len(prefix)]]['type']=='one2many':
- if field[len(prefix)] in done:
+ if field_type == 'one2many':
+ if field_name in done:
continue
- done[field[len(prefix)]] = True
- relation = fields_def[field[len(prefix)]]['relation']
+ done[field_name] = True
+ relation = fields_def[field_name]['relation']
relation_obj = self.pool.get(relation)
newfd = relation_obj.fields_get( cr, uid, context=context )
pos = position
- res = many_ids(line[i], relation, current_module, mode)
+ res = []
first = 0
while pos < len(datas):
- res2 = process_liness(self, datas, prefix + [field[len(prefix)]], current_module, relation_obj._name, newfd, pos, first)
+ res2 = process_liness(self, datas, prefix + [field_name], current_module, relation_obj._name, newfd, pos, first)
if not res2:
break
(newrow, pos, w2, data_res_id2, xml_id2) = res2
warning += w2
first += 1
- if data_res_id2:
- res.append((4, data_res_id2))
-
if (not newrow) or not reduce(lambda x, y: x or y, newrow.values(), 0):
break
res.append( (data_res_id2 and 1 or 0, data_res_id2 or 0, newrow) )
-
- elif fields_def[field[len(prefix)]]['type']=='many2one':
- relation = fields_def[field[len(prefix)]]['relation']
+ elif field_type == 'many2one':
+ relation = fields_def[field_name]['relation']
res = _get_id(relation, line[i], current_module, mode)
- elif fields_def[field[len(prefix)]]['type']=='many2many':
- relation = fields_def[field[len(prefix)]]['relation']
+ elif field_type == 'many2many':
+ relation = fields_def[field_name]['relation']
res = many_ids(line[i], relation, current_module, mode)
- elif fields_def[field[len(prefix)]]['type'] == 'integer':
+ elif field_type == 'integer':
res = line[i] and int(line[i]) or 0
- elif fields_def[field[len(prefix)]]['type'] == 'boolean':
+ elif field_type == 'boolean':
res = line[i].lower() not in ('0', 'false', 'off')
- elif fields_def[field[len(prefix)]]['type'] == 'float':
+ elif field_type == 'float':
res = line[i] and float(line[i]) or 0.0
- elif fields_def[field[len(prefix)]]['type'] == 'selection':
- for key, val in fields_def[field[len(prefix)]]['selection']:
+ elif field_type == 'selection':
+ for key, val in fields_def[field_name]['selection']:
if tools.ustr(line[i]) in [tools.ustr(key), tools.ustr(val)]:
res = key
break
if line[i] and not res:
- logger.notifyChannel("import", netsvc.LOG_WARNING,
- _("key '%s' not found in selection field '%s'") % \
- (tools.ustr(line[i]), tools.ustr(field[len(prefix)])))
- warning += [_("Key/value '%s' not found in selection field '%s'") % (tools.ustr(line[i]), tools.ustr(field[len(prefix)]))]
+ _logger.warning(
+ _("key '%s' not found in selection field '%s'"),
+ tools.ustr(line[i]), tools.ustr(field_name))
+ warning.append(_("Key/value '%s' not found in selection field '%s'") % (
+ tools.ustr(line[i]), tools.ustr(field_name)))
else:
res = line[i]
- row[field[len(prefix)]] = res or False
+ row[field_name] = res or False
- result = (row, nbrmax, warning, data_res_id, xml_id)
- return result
+ return row, nbrmax, warning, data_res_id, xml_id
fields_def = self.fields_get(cr, uid, context=context)
- if config.get('import_partial', False) and filename:
- data = pickle.load(file(config.get('import_partial')))
-
position = 0
- while position<len(datas):
- res = {}
+ if config.get('import_partial') and filename:
+ with open(config.get('import_partial'), 'rb') as partial_import_file:
+ data = pickle.load(partial_import_file)
+ position = data.get(filename, 0)
+ while position<len(datas):
(res, position, warning, res_id, xml_id) = \
process_liness(self, datas, [], current_module, self._name, fields_def, position=position)
if len(warning):
cr.rollback()
- return (-1, res, 'Line ' + str(position) +' : ' + '!\n'.join(warning), '')
+ return -1, res, 'Line ' + str(position) +' : ' + '!\n'.join(warning), ''
try:
ir_model_data_obj._update(cr, uid, self._name,
current_module, res, mode=mode, xml_id=xml_id,
noupdate=noupdate, res_id=res_id, context=context)
except Exception, e:
- return (-1, res, 'Line ' + str(position) + ' : ' + tools.ustr(e), '')
+ return -1, res, 'Line ' + str(position) + ' : ' + tools.ustr(e), ''
- if config.get('import_partial', False) and filename and (not (position%100)):
- data = pickle.load(file(config.get('import_partial')))
+ if config.get('import_partial') and filename and (not (position%100)):
+ with open(config.get('import_partial'), 'rb') as partial_import:
+ data = pickle.load(partial_import)
data[filename] = position
- pickle.dump(data, file(config.get('import_partial'), 'wb'))
+ with open(config.get('import_partial'), 'wb') as partial_import:
+ pickle.dump(data, partial_import)
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
cr.commit()
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
- return (position, 0, 0, 0)
+ return position, 0, 0, 0
def get_invalid_fields(self, cr, uid):
return list(self._invalids)
trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('sum'))
if trans:
node.set('sum', trans)
+ if node.get('avg'):
+ trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('avg'))
+ if trans:
+ node.set('avg', trans)
if node.get('help'):
trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('help'))
if trans:
res.insert(0, ("Can't find field '%s' in the following view parts composing the view of object model '%s':" % (field, model), None))
msg = "\n * ".join([r[0] for r in res])
msg += "\n\nEither you wrongly customized this view, or some modules bringing those views are not compatible with your current data model"
- netsvc.Logger().notifyChannel('orm', netsvc.LOG_ERROR, msg)
+ _logger.error(msg)
raise except_orm('View error', msg)
return arch, fields
"""
_rec_name = self._rec_name
if _rec_name not in self._columns:
- _rec_name = self._columns.keys()[0]
+ _rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
view = etree.Element('tree', string=self._description)
etree.SubElement(view, 'field', name=_rec_name)
if context is None:
context = {}
args = args[:]
- if name:
+ # optimize out the default criterion of ``ilike ''`` that matches everything
+ if not (name == '' and operator == 'ilike'):
args += [(self._rec_name, operator, name)]
access_rights_uid = name_get_uid or user
ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
try:
getattr(self, '_ormcache')
self._ormcache = {}
+ self.pool._any_cache_cleared = True
except AttributeError:
pass
+
+ def _read_group_fill_results(self, cr, uid, domain, groupby, groupby_list, aggregated_fields,
+ read_group_result, read_group_order=None, context=None):
+ """Helper method for filling in empty groups for all possible values of
+ the field being grouped by"""
+
+ # self._group_by_full should map groupable fields to a method that returns
+ # a list of all aggregated values that we want to display for this field,
+ # in the form of a m2o-like pair (key,label).
+ # This is useful to implement kanban views for instance, where all columns
+ # should be displayed even if they don't contain any record.
+
+ # Grab the list of all groups that should be displayed, including all present groups
+ present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
+ all_groups = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
+ read_group_order=read_group_order,
+ access_rights_uid=openerp.SUPERUSER_ID,
+ context=context)
+
+ result_template = dict.fromkeys(aggregated_fields, False)
+ result_template.update({groupby + '_count':0})
+ if groupby_list and len(groupby_list) > 1:
+ result_template.update(__context={'group_by': groupby_list[1:]})
+
+ # Merge the left_side (current results as dicts) with the right_side (all
+ # possible values as m2o pairs). Both lists are supposed to be using the
+ # same ordering, and can be merged in one pass.
+ result = []
+ known_values = {}
+ def append_left(left_side):
+ grouped_value = left_side[groupby] and left_side[groupby][0]
+ if not grouped_value in known_values:
+ result.append(left_side)
+ known_values[grouped_value] = left_side
+ else:
+ count_attr = groupby + '_count'
+ known_values[grouped_value].update({count_attr: left_side[count_attr]})
+ def append_right(right_side):
+ grouped_value = right_side[0]
+ if not grouped_value in known_values:
+ line = dict(result_template)
+ line.update({
+ groupby: right_side,
+ '__domain': [(groupby,'=',grouped_value)] + domain,
+ })
+ result.append(line)
+ known_values[grouped_value] = line
+ while read_group_result or all_groups:
+ left_side = read_group_result[0] if read_group_result else None
+ right_side = all_groups[0] if all_groups else None
+ assert left_side is None or left_side[groupby] is False \
+ or isinstance(left_side[groupby], (tuple,list)), \
+ 'M2O-like pair expected, got %r' % left_side[groupby]
+ assert right_side is None or isinstance(right_side, (tuple,list)), \
+ 'M2O-like pair expected, got %r' % right_side
+ if left_side is None:
+ append_right(all_groups.pop(0))
+ elif right_side is None:
+ append_left(read_group_result.pop(0))
+ elif left_side[groupby] == right_side:
+ append_left(read_group_result.pop(0))
+ all_groups.pop(0) # discard right_side
+ elif not left_side[groupby] or not left_side[groupby][0]:
+ # left side == "Undefined" entry, not present on right_side
+ append_left(read_group_result.pop(0))
+ else:
+ append_right(all_groups.pop(0))
+ return result
+
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
"""
Get the list of records in list view grouped by the given ``groupby`` fields
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fget = self.fields_get(cr, uid, fields)
- float_int_fields = filter(lambda x: fget[x]['type'] in ('float', 'integer'), fields)
flist = ''
group_count = group_by = groupby
if groupby:
if fget.get(groupby):
- if fget[groupby]['type'] in ('date', 'datetime'):
- flist = "to_char(%s,'yyyy-mm') as %s " % (qualified_groupby_field, groupby)
- groupby = "to_char(%s,'yyyy-mm')" % (qualified_groupby_field)
- qualified_groupby_field = groupby
+ groupby_type = fget[groupby]['type']
+ if groupby_type in ('date', 'datetime'):
+ qualified_groupby_field = "to_char(%s,'yyyy-mm')" % qualified_groupby_field
+ flist = "%s as %s " % (qualified_groupby_field, groupby)
+ elif groupby_type == 'boolean':
+ qualified_groupby_field = "coalesce(%s,false)" % qualified_groupby_field
+ flist = "%s as %s " % (qualified_groupby_field, groupby)
else:
flist = qualified_groupby_field
else:
raise except_orm(_('Invalid group_by'),
_('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(groupby,))
-
- fields_pre = [f for f in float_int_fields if
- f == self.CONCURRENCY_CHECK_FIELD
- or (f in self._columns and getattr(self._columns[f], '_classic_write'))]
- for f in fields_pre:
- if f not in ['id', 'sequence']:
- group_operator = fget[f].get('group_operator', 'sum')
- if flist:
- flist += ', '
- qualified_field = '"%s"."%s"' % (self._table, f)
- flist += "%s(%s) AS %s" % (group_operator, qualified_field, f)
+ aggregated_fields = [
+ f for f in fields
+ if f not in ('id', 'sequence')
+ if fget[f]['type'] in ('integer', 'float')
+ if (f in self._columns and getattr(self._columns[f], '_classic_write'))]
+ for f in aggregated_fields:
+ group_operator = fget[f].get('group_operator', 'sum')
+ if flist:
+ flist += ', '
+ qualified_field = '"%s"."%s"' % (self._table, f)
+ flist += "%s(%s) AS %s" % (group_operator, qualified_field, f)
gb = groupby and (' GROUP BY ' + qualified_groupby_field) or ''
alldata[r['id']] = r
del r['id']
- data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=orderby or groupby, context=context)
+ order = orderby or groupby
+ data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=order, context=context)
# the IDS of records that have groupby field value = False or '' should be sorted too
data_ids += filter(lambda x:x not in data_ids, alldata.keys())
data = self.read(cr, uid, data_ids, groupby and [groupby] or ['id'], context=context)
del alldata[d['id']][groupby]
d.update(alldata[d['id']])
del d['id']
+
+ if groupby and groupby in self._group_by_full:
+ data = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
+ aggregated_fields, data, read_group_order=order,
+ context=context)
+
return data
def _inherits_join_add(self, current_table, parent_model_name, query):
def _parent_store_compute(self, cr):
if not self._parent_store:
return
- logger = netsvc.Logger()
- logger.notifyChannel('data', netsvc.LOG_INFO, 'Computing parent left and right for table %s...' % (self._table, ))
+ _logger.info('Computing parent left and right for table %s...', self._table)
def browse_rec(root, pos=0):
# TODO: set order
where = self._parent_name+'='+str(root)
return True
def _update_store(self, cr, f, k):
- logger = netsvc.Logger()
- logger.notifyChannel('data', netsvc.LOG_INFO, "storing computed values of fields.function '%s'" % (k,))
+ _logger.info("storing computed values of fields.function '%s'", k)
ss = self._columns[k]._symbol_set
update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
cr.execute('select id from '+self._table)
elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
return
raise except_orm(_('ValidateError'),
- _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field))
+ _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field))
def _check_removed_columns(self, cr, log=False):
# iterate on the database columns to drop the NOT NULL constraints
for column in cr.dictfetchall():
if log:
- self.__logger.debug("column %s is in the table %s but not in the corresponding object %s",
- column['attname'], self._table, self._name)
+ _logger.debug("column %s is in the table %s but not in the corresponding object %s",
+ column['attname'], self._table, self._name)
if column['attnotnull']:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
- self.__schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
- self._table, column['attname'])
+ _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
+ self._table, column['attname'])
# checked version: for direct m2o starting from `self`
def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or 'cascade'
self._foreign_keys.append((self._table, source_field, dest_model._table, ondelete or 'set null'))
- self.__schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
- self._table, source_field, dest_model._table, ondelete)
+ _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
+ self._table, source_field, dest_model._table, ondelete)
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
self._foreign_keys.append((source_table, source_field, dest_model._table, ondelete or 'set null'))
- self.__schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
- source_table, source_field, dest_model._table, ondelete)
+ _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
+ source_table, source_field, dest_model._table, ondelete)
+
+ def _drop_constraint(self, cr, source_table, constraint_name):
+ cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
+
+ def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
+ # Find FK constraint(s) currently established for the m2o field,
+ # and see whether they are stale or not
+ cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
+ cl2.relname as foreign_table
+ FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
+ pg_attribute as att1, pg_attribute as att2
+ WHERE con.conrelid = cl1.oid
+ AND cl1.relname = %s
+ AND con.confrelid = cl2.oid
+ AND array_lower(con.conkey, 1) = 1
+ AND con.conkey[1] = att1.attnum
+ AND att1.attrelid = cl1.oid
+ AND att1.attname = %s
+ AND array_lower(con.confkey, 1) = 1
+ AND con.confkey[1] = att2.attnum
+ AND att2.attrelid = cl2.oid
+ AND att2.attname = %s
+ AND con.contype = 'f'""", (source_table, source_field, 'id'))
+ constraints = cr.dictfetchall()
+ if constraints:
+ if len(constraints) == 1:
+ # Is it the right constraint?
+ cons, = constraints
+ if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
+ or cons['foreign_table'] != dest_model._table:
+ _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
+ source_table, cons['constraint_name'])
+ self._drop_constraint(cr, source_table, cons['constraint_name'])
+ self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
+ # else it's all good, nothing to do!
+ else:
+ # Multiple FKs found for the same field, drop them all, and re-create
+ for cons in constraints:
+ _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
+ source_table, cons['constraint_name'])
+ self._drop_constraint(cr, source_table, cons['constraint_name'])
+ self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
+
+
def _auto_init(self, cr, context=None):
"""
cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
res['attname'] = k
column_data[k] = res
- self.__schema.debug("Table '%s': renamed column '%s' to '%s'",
- self._table, f.oldname, k)
+ _schema.debug("Table '%s': renamed column '%s' to '%s'",
+ self._table, f.oldname, k)
# The field already exists in database. Possibly
# change its type, rename it, drop it or change its
f_pg_notnull = res['attnotnull']
if isinstance(f, fields.function) and not f.store and\
not getattr(f, 'nodrop', False):
- self.__logger.info('column %s (%s) in table %s removed: converted to a function !\n',
- k, f.string, self._table)
+ _logger.info('column %s (%s) in table %s removed: converted to a function !\n',
+ k, f.string, self._table)
cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
cr.commit()
- self.__schema.debug("Table '%s': dropped column '%s' with cascade",
- self._table, k)
+ _schema.debug("Table '%s': dropped column '%s' with cascade",
+ self._table, k)
f_obj_type = None
else:
f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
- self.__schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
+ _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
self._table, k, f_pg_size, f.size)
for c in casts:
if (f_pg_type==c[0]) and (f._type==c[1]):
cr.execute(('UPDATE "%s" SET "%s"=temp_change_size'+c[3]) % (self._table, k))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
- self.__schema.debug("Table '%s': column '%s' changed type from %s to %s",
+ _schema.debug("Table '%s': column '%s' changed type from %s to %s",
self._table, k, c[0], c[1])
break
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
- self.__schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
+ _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
self._table, k, f_pg_type, f._type, newname)
# if the field is required and hasn't got a NOT NULL constraint
try:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
cr.commit()
- self.__schema.debug("Table '%s': column '%s': added NOT NULL constraint",
- self._table, k)
+ _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
+ self._table, k)
except Exception:
msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
- self.__schema.warn(msg, self._table, k, self._table, k)
+ _schema.warning(msg, self._table, k, self._table, k)
cr.commit()
elif not f.required and f_pg_notnull == 1:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
- self.__schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
- self._table, k)
+ _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
+ self._table, k)
# Verify index
indexname = '%s_%s_index' % (self._table, k)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
"This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
" because there is a length limit for indexable btree values!\n"\
"Use a search view instead if you simply want to make the field searchable."
- self.__schema.warn(msg, self._table, k, f._type)
+ _schema.warning(msg, self._table, k, f._type)
if res2 and not f.select:
cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
- self.__schema.debug(msg, self._table, k, f._type)
+ _schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one):
dest_model = self.pool.get(f._obj)
- ref = dest_model._table
- if ref != 'ir_actions':
- cr.execute('SELECT confdeltype, conname FROM pg_constraint as con, pg_class as cl1, pg_class as cl2, '
- 'pg_attribute as att1, pg_attribute as att2 '
- 'WHERE con.conrelid = cl1.oid '
- 'AND cl1.relname = %s '
- 'AND con.confrelid = cl2.oid '
- 'AND cl2.relname = %s '
- 'AND array_lower(con.conkey, 1) = 1 '
- 'AND con.conkey[1] = att1.attnum '
- 'AND att1.attrelid = cl1.oid '
- 'AND att1.attname = %s '
- 'AND array_lower(con.confkey, 1) = 1 '
- 'AND con.confkey[1] = att2.attnum '
- 'AND att2.attrelid = cl2.oid '
- 'AND att2.attname = %s '
- "AND con.contype = 'f'", (self._table, ref, k, 'id'))
- res2 = cr.dictfetchall()
- if res2:
- if res2[0]['confdeltype'] != POSTGRES_CONFDELTYPES.get((f.ondelete or 'set null').upper(), 'a'):
- cr.execute('ALTER TABLE "' + self._table + '" DROP CONSTRAINT "' + res2[0]['conname'] + '"')
- self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
- cr.commit()
- self.__schema.debug("Table '%s': column '%s': XXX",
- self._table, k)
+ if dest_model._table != 'ir_actions':
+ self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
# The field doesn't exist in database. Create it if necessary.
else:
# add the missing field
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
- self.__schema.debug("Table '%s': added column '%s' with definition=%s",
+ _schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, k, get_pg_type(f)[1])
# initialize it
query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
cr.execute(query, (ss[1](default),))
cr.commit()
- netsvc.Logger().notifyChannel('data', netsvc.LOG_DEBUG, "Table '%s': setting default value of new column %s" % (self._table, k))
+ _logger.debug("Table '%s': setting default value of new column %s", self._table, k)
# remember the functions to call for the stored fields
if isinstance(f, fields.function):
try:
cr.commit()
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
- self.__schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
+ _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
self._table, k)
except Exception:
msg = "WARNING: unable to set column %s of table %s not null !\n"\
"Try to re-run: openerp-server --update=module\n"\
"If it doesn't work, update records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
- self.__logger.warn(msg, k, self._table, self._table, k)
+ _logger.warning(msg, k, self._table, self._table, k)
cr.commit()
else:
def _create_table(self, cr):
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
- self.__schema.debug("Table '%s': created", self._table)
+ _schema.debug("Table '%s': created", self._table)
def _parent_columns_exist(self, cr):
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
if 'parent_left' not in self._columns:
- self.__logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
- self._table)
- self.__schema.debug("Table '%s': added column '%s' with definition=%s",
- self._table, 'parent_left', 'INTEGER')
+ _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
+ self._table)
+ _schema.debug("Table '%s': added column '%s' with definition=%s",
+ self._table, 'parent_left', 'INTEGER')
elif not self._columns['parent_left'].select:
- self.__logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
- self._table)
+ _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
+ self._table)
if 'parent_right' not in self._columns:
- self.__logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
- self._table)
- self.__schema.debug("Table '%s': added column '%s' with definition=%s",
- self._table, 'parent_right', 'INTEGER')
+ _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
+ self._table)
+ _schema.debug("Table '%s': added column '%s' with definition=%s",
+ self._table, 'parent_right', 'INTEGER')
elif not self._columns['parent_right'].select:
- self.__logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
- self._table)
+ _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
+ self._table)
if self._columns[self._parent_name].ondelete != 'cascade':
- self.__logger.error("The column %s on object %s must be set as ondelete='cascade'",
- self._parent_name, self._name)
+ _logger.error("The column %s on object %s must be set as ondelete='cascade'",
+ self._parent_name, self._name)
cr.commit()
if not cr.rowcount:
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def))
cr.commit()
- self.__schema.debug("Table '%s': added column '%s' with definition=%s",
- self._table, field, field_def)
+ _schema.debug("Table '%s': added column '%s' with definition=%s",
+ self._table, field, field_def)
def _select_column_data(self, cr):
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
cr.commit()
- self.__schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
+ _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
def _add_sql_constraints(self, cr):
_sql_constraints.
"""
+ def unify_cons_text(txt):
+ return txt.lower().replace(', ',',').replace(' (','(')
+
for (key, con, _) in self._sql_constraints:
conname = '%s_%s' % (self._table, key)
# constraint does not exists:
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
- elif con.lower() not in [item['condef'].lower() for item in existing_constraints]:
+ elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
# constraint exists but its definition has changed:
sql_actions['drop']['execute'] = True
sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
try:
cr.execute(sql_action['query'])
cr.commit()
- self.__schema.debug(sql_action['msg_ok'])
+ _schema.debug(sql_action['msg_ok'])
except:
- self.__schema.warn(sql_action['msg_err'])
+ _schema.warning(sql_action['msg_err'])
cr.rollback()
def _inherits_check(self):
for table, field_name in self._inherits.items():
if field_name not in self._columns:
- logging.getLogger('init').info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.' % (field_name, self._name))
+ _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
required=True, ondelete="cascade")
elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() != "cascade":
- logging.getLogger('init').warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade", forcing it.' % (field_name, self._name))
+ _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade", forcing it.', field_name, self._name)
self._columns[field_name].required = True
self._columns[field_name].ondelete = "cascade"
return "date_trunc('second', %s) as %s" % (f_qual, f)
if f == self.CONCURRENCY_CHECK_FIELD:
if self._log_access:
- return "COALESCE(%s.write_date, %s.create_date, now())::timestamp AS %s" % (self._table, self._table, f,)
- return "now()::timestamp AS %s" % (f,)
+ return "COALESCE(%s.write_date, %s.create_date, (now() at time zone 'UTC'))::timestamp AS %s" % (self._table, self._table, f,)
+ return "(now() at time zone 'UTC')::timestamp AS %s" % (f,)
if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
return 'length(%s) as "%s"' % (f_qual, f)
return f_qual
return
if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
return
- check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, now())::timestamp)"
+ check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
for sub_ids in cr.split_for_in_conditions(ids):
ids_to_check = []
for id in sub_ids:
self.check_unlink(cr, uid)
- properties = self.pool.get('ir.property')
+ ir_property = self.pool.get('ir.property')
+
+ # Check if the records are used as default properties.
domain = [('res_id', '=', False),
('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
]
- if properties.search(cr, uid, domain, context=context):
+ if ir_property.search(cr, uid, domain, context=context):
raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
+ # Delete the records' properties.
+ property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
+ ir_property.unlink(cr, uid, property_ids, context=context)
+
wf_service = netsvc.LocalService("workflow")
for oid in ids:
wf_service.trg_delete(uid, self._name, oid, cr)
if readonly[0][0] >= 1:
edit = True
break
- elif readonly[0][0] == 0:
- edit = False
- else:
- edit = False
if not edit:
vals.pop(field)
if self._log_access:
upd0.append('write_uid=%s')
- upd0.append('write_date=now()')
+ upd0.append("write_date=(now() at time zone 'UTC')")
upd1.append(user)
if len(upd0):
for id in ids:
result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
+ unknown_fields = updend[:]
for table in self._inherits:
col = self._inherits[table]
nids = []
for val in updend:
if self._inherit_fields[val][0] == table:
v[val] = vals[val]
+ unknown_fields.remove(val)
if v:
self.pool.get(table).write(cr, user, nids, v, context)
+ if unknown_fields:
+ _logger.warning(
+ 'No such field(s) in model %s: %s.',
+ self._name, ', '.join(unknown_fields))
self._validate(cr, user, ids, context)
# TODO: use _order to set dest at the right position and not first node of parent
tocreate[v] = {'id': vals[self._inherits[v]]}
(upd0, upd1, upd2) = ('', '', [])
upd_todo = []
+ unknown_fields = []
for v in vals.keys():
- if v in self._inherit_fields:
+ if v in self._inherit_fields and v not in self._columns:
(table, col, col_detail, original_parent) = self._inherit_fields[v]
tocreate[table][v] = vals[v]
del vals[v]
else:
if (v not in self._inherit_fields) and (v not in self._columns):
del vals[v]
+ unknown_fields.append(v)
+ if unknown_fields:
+ _logger.warning(
+ 'No such field(s) in model %s: %s.',
+ self._name, ', '.join(unknown_fields))
# Try-except added to filter the creation of those records whose filds are readonly.
# Example : any dashboard which has all the fields readonly.(due to Views(database views))
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
-
+
+ # When linking/creating parent records, force context without 'no_store_function' key that
+ # defers stored functions computing, as these won't be computed in batch at the end of create().
+ parent_context = dict(context)
+ parent_context.pop('no_store_function', None)
+
if record_id is None or not record_id:
- record_id = self.pool.get(table).create(cr, user, tocreate[table], context=context)
+ record_id = self.pool.get(table).create(cr, user, tocreate[table], context=parent_context)
else:
- self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=context)
+ self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=parent_context)
upd0 += ',' + self._inherits[table]
upd1 += ',%s'
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
upd0 += ',create_uid,create_date'
- upd1 += ',%s,now()'
+ upd1 += ",%s,(now() at time zone 'UTC')"
upd2.append(user)
cr.execute('insert into "'+self._table+'" (id'+upd0+") values ("+str(id_new)+upd1+')', tuple(upd2))
self.check_access_rule(cr, user, [id_new], 'create', context=context)
"""Fetch records as objects allowing to use dot notation to browse fields and relations
:param cr: database cursor
- :param user: current user id
+ :param uid: current user id
:param select: id or list of ids.
:param context: context arguments, like lang, time zone
:rtype: object or list of objects requested
domain = domain[:]
# if the object has a field named 'active', filter out all inactive
# records unless they were explicitely asked for
- if 'active' in self._columns and (active_test and context.get('active_test', True)):
+ if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
if domain:
- active_in_args = False
- for a in domain:
- if a[0] == 'active':
- active_in_args = True
- if not active_in_args:
+ # the item[0] trick below works for domain items and '&'/'|'/'!'
+ # operators too
+ if not any(item[0] == 'active' for item in domain):
domain.insert(0, ('active', '=', 1))
else:
domain = [('active', '=', 1)]
assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
- logging.getLogger('orm.search').debug("Many2one function/related fields must be stored " \
- "to be used as ordering fields! Ignoring sorting for %s.%s",
- self._name, order_field)
+ _logger.debug("Many2one function/related fields must be stored " \
+ "to be used as ordering fields! Ignoring sorting for %s.%s",
+ self._name, order_field)
return
# figure out the applicable order_by for the m2o
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
- :return: True
+ :return: id of the newly created record
"""
if context is None:
return [x[0] for x in cr.fetchall()]
def check_recursion(self, cr, uid, ids, context=None, parent=None):
- warnings.warn("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
- self._name, DeprecationWarning, stacklevel=3)
+ _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
+ self._name)
assert parent is None or parent in self._columns or parent in self._inherit_fields,\
"The 'parent' parameter passed to check_recursion() must be None or a valid field name"
return self._check_recursion(cr, uid, ids, context, parent)
def _transient_clean_rows_older_than(self, cr, seconds):
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
cr.execute("SELECT id FROM " + self._table + " WHERE"
- " COALESCE(write_date, create_date, now())::timestamp <"
- " (now() - interval %s)", ("%s seconds" % seconds,))
+ " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp <"
+ " ((now() at time zone 'UTC') - interval %s)", ("%s seconds" % seconds,))
ids = [x[0] for x in cr.fetchall()]
self.unlink(cr, SUPERUSER_ID, ids)
def _transient_clean_old_rows(self, cr, count):
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
cr.execute(
- "SELECT id, COALESCE(write_date, create_date, now())::timestamp"
+ "SELECT id, COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
" AS t FROM " + self._table +
" ORDER BY t LIMIT %s", (count,))
ids = [x[0] for x in cr.fetchall()]