# - functions
#
#
-
import calendar
import copy
import datetime
import logging
+import operator
import pickle
-import random
import re
import string
-import sys
import time
import traceback
import types
-import fields
import netsvc
-import tools
+from lxml import etree
+from tools.config import config
from tools.translate import _
-import copy
-import sys
-
-try:
- from lxml import etree
-except ImportError:
- sys.stderr.write("ERROR: Import lxml module\n")
- sys.stderr.write("ERROR: Try to install the python-lxml package\n")
-
-from tools.config import config
+import fields
+import tools
+from tools.safe_eval import safe_eval as eval
regex_order = re.compile('^(([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
def intersect(la, lb):
return filter(lambda x: x in lb, la)
-
class except_orm(Exception):
def __init__(self, name, value):
self.name = name
def __getitem__(self, name):
if name == 'id':
return self._id
+
if name not in self._data[self._id]:
# build the list of fields we will fetch
elif name in self._table._inherit_fields:
col = self._table._inherit_fields[name][2]
elif hasattr(self._table, str(name)):
- if isinstance(getattr(self._table, name), (types.MethodType, types.LambdaType, types.FunctionType)):
- return lambda *args, **argv: getattr(self._table, name)(self._cr, self._uid, [self._id], *args, **argv)
+ attr = getattr(self._table, name)
+
+ if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)):
+ return lambda *args, **argv: attr(self._cr, self._uid, [self._id], *args, **argv)
else:
- return getattr(self._table, name)
+ return attr
else:
self.logger.notifyChannel("browse_record", netsvc.LOG_WARNING,
"Field '%s' does not exist in object '%s': \n%s" % (
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
if col._prefetch:
# gen the list of "local" (ie not inherited) fields which are classic or many2one
- ffields = filter(lambda x: x[1]._classic_write, self._table._columns.items())
+ fields_to_fetch = filter(lambda x: x[1]._classic_write, self._table._columns.items())
# gen the list of inherited fields
inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
# complete the field list with the inherited fields which are classic or many2one
- ffields += filter(lambda x: x[1]._classic_write, inherits)
+ fields_to_fetch += filter(lambda x: x[1]._classic_write, inherits)
# otherwise we fetch only that field
else:
- ffields = [(name, col)]
+ fields_to_fetch = [(name, col)]
ids = filter(lambda id: name not in self._data[id], self._data.keys())
- # read the data
- fffields = map(lambda x: x[0], ffields)
- datas = self._table.read(self._cr, self._uid, ids, fffields, context=self._context, load="_classic_write")
+ # read the results
+ field_names = map(lambda x: x[0], fields_to_fetch)
+ field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
if self._fields_process:
lang = self._context.get('lang', 'en_US') or 'en_US'
lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid,[('code','=',lang)])
raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,))
lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid,lang_obj_ids[0])
- for n, f in ffields:
- if f._type in self._fields_process:
- for d in datas:
- d[n] = self._fields_process[f._type](d[n])
- if d[n]:
- d[n].set_value(self._cr, self._uid, d[n], self, f, lang_obj)
-
+ for field_name, field_column in fields_to_fetch:
+ if field_column._type in self._fields_process:
+ for result_line in field_values:
+ result_line[field_name] = self._fields_process[field_column._type](result_line[field_name])
+ if result_line[field_name]:
+ result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj)
- if not datas:
+ if not field_values:
# Where did those ids come from? Perhaps old entries in ir_model_dat?
- self.__logger.warn("No datas found for ids %s in %s",
- ids, self)
+ self.__logger.warn("No field_values found for ids %s in %s", ids, self)
raise KeyError('Field %s not found in %s'%(name,self))
# create browse records for 'remote' objects
- for data in datas:
- if len(str(data['id']).split('-')) > 1:
- data['id'] = int(str(data['id']).split('-')[0])
+ for result_line in field_values:
new_data = {}
- for n, f in ffields:
- if f._type in ('many2one', 'one2one'):
- if data[n]:
- obj = self._table.pool.get(f._obj)
- compids = False
- if type(data[n]) in (type([]),type( (1,) )):
- ids2 = data[n][0]
+ for field_name, field_column in fields_to_fetch:
+ if field_column._type in ('many2one', 'one2one'):
+ if result_line[field_name]:
+ obj = self._table.pool.get(field_column._obj)
+ if isinstance(result_line[field_name], (list,tuple)):
+ value = result_line[field_name][0]
else:
- ids2 = data[n]
- if ids2:
+ value = result_line[field_name]
+ if value:
# FIXME: this happen when a _inherits object
# overwrite a field of it parent. Need
# testing to be sure we got the right
# object and not the parent one.
- if not isinstance(ids2, browse_record):
- new_data[n] = browse_record(self._cr,
- self._uid, ids2, obj, self._cache,
+ if not isinstance(value, browse_record):
+ new_data[field_name] = browse_record(self._cr,
+ self._uid, value, obj, self._cache,
context=self._context,
list_class=self._list_class,
fields_process=self._fields_process)
+ else:
+ new_data[field_name] = value
else:
- new_data[n] = browse_null()
+ new_data[field_name] = browse_null()
else:
- new_data[n] = browse_null()
- elif f._type in ('one2many', 'many2many') and len(data[n]):
- new_data[n] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(f._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in data[n]], self._context)
- elif f._type in ('reference'):
- if data[n]:
- if isinstance(data[n], browse_record):
- new_data[n] = data[n]
+ new_data[field_name] = browse_null()
+ elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
+ new_data[field_name] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in result_line[field_name]], self._context)
+ elif field_column._type in ('reference'):
+ if result_line[field_name]:
+ if isinstance(result_line[field_name], browse_record):
+ new_data[field_name] = result_line[field_name]
else:
- ref_obj, ref_id = data[n].split(',')
+ ref_obj, ref_id = result_line[field_name].split(',')
ref_id = long(ref_id)
obj = self._table.pool.get(ref_obj)
- compids = False
- new_data[n] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
+ new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
else:
- new_data[n] = browse_null()
+ new_data[field_name] = browse_null()
else:
- new_data[n] = data[n]
- self._data[data['id']].update(new_data)
+ new_data[field_name] = result_line[field_name]
+ self._data[result_line['id']].update(new_data)
+
if not name in self._data[self._id]:
#how did this happen?
self.logger.notifyChannel("browse_record", netsvc.LOG_ERROR,
- "Ffields: %s, datas: %s"%(fffields, datas))
+ "Fields to fetch: %s, Field values: %s"%(field_names, field_values))
self.logger.notifyChannel("browse_record", netsvc.LOG_ERROR,
- "Data: %s, Table: %s"%(self._data[self._id], self._table))
+ "Cached: %s, Table: %s"%(self._data[self._id], self._table))
raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
return self._data[self._id][name]
return "browse_record(%s, %d)" % (self._table_name, self._id)
def __eq__(self, other):
+ if not isinstance(other, browse_record):
+ return False
return (self._table_name, self._id) == (other._table_name, other._id)
def __ne__(self, other):
+ if not isinstance(other, browse_record):
+ return True
return (self._table_name, self._id) != (other._table_name, other._id)
# we need to define __unicode__ even though we've already defined __str__
f_type = ('int4', 'INTEGER')
else:
f_type = ('varchar', 'VARCHAR(%d)' % f_size)
- elif isinstance(f, fields.function) and eval('fields.'+(f._type)) in type_dict:
- t = eval('fields.'+(f._type))
+ elif isinstance(f, fields.function) and eval('fields.'+(f._type),globals()) in type_dict:
+ t = eval('fields.'+(f._type), globals())
f_type = (type_dict[t], type_dict[t])
elif isinstance(f, fields.function) and f._type == 'float':
if f.digits:
_inherits = {}
_table = None
_invalids = set()
+ _log_create = False
CONCURRENCY_CHECK_FIELD = '__last_update'
+ def log(self, cr, uid, id, message, secondary=False, context=None):
+ return self.pool.get('res.log').create(cr, uid, {
+ 'name': message,
+ 'res_model': self._name,
+ 'secondary': secondary,
+ 'res_id': id},
+ context=context
+ )
def view_init(self, cr , uid , fields_list, context=None):
"""Override this method to do specific things when a view on the object is opened."""
pass
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None):
- raise _('The read_group method is not implemented on this object !')
+ raise NotImplementedError(_('The read_group method is not implemented on this object !'))
def _field_create(self, cr, context={}):
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
'name': k,
'field_description': f.string.replace("'", " "),
'ttype': f._type,
- 'relation': f._obj or 'NULL',
+ 'relation': f._obj or '',
'view_load': (f.view_load and 1) or 0,
'select_level': tools.ustr(f.select or 0),
'readonly':(f.readonly and 1) or 0,
return browse_null()
def __export_row(self, cr, uid, row, fields, context=None):
+ if context is None:
+ context = {}
def check_type(field_type):
if field_type == 'float':
cols = selection_field(self._inherits)
if cols and cols._type == 'selection':
sel_list = cols.selection
- if type(sel_list) == type([]):
- r = [x[1] for x in sel_list if r==x[0]][0]
-
+ if r and type(sel_list) == type([]):
+ r = [x[1] for x in sel_list if r==x[0]]
+ r = r and r[0] or False
if not r:
if f[i] in self._columns:
r = check_type(self._columns[f[i]]._type)
for rr in r :
if isinstance(rr.name, browse_record):
rr = rr.name
- rr_name = self.pool.get(rr._table_name).name_get(cr, uid, [rr.id])
+ rr_name = self.pool.get(rr._table_name).name_get(cr, uid, [rr.id], context=context)
rr_name = rr_name and rr_name[0] and rr_name[0][1] or ''
dt += tools.ustr(rr_name or '') + ','
data[fpos] = dt[:-1]
i += 1
if i == len(f):
if isinstance(r, browse_record):
- r = self.pool.get(r._table_name).name_get(cr, uid, [r.id])
+ r = self.pool.get(r._table_name).name_get(cr, uid, [r.id], context=context)
r = r and r[0] and r[0][1] or ''
data[fpos] = tools.ustr(r or '')
return [data] + lines
This method is used when exporting data via client menu
"""
- if not context:
+ if context is None:
context = {}
imp_comp = context.get('import_comp',False)
cols = self._columns.copy()
module, xml_id = line[i].rsplit('.', 1)
else:
module, xml_id = current_module, line[i]
- id = ir_model_data_obj._get_id(cr, uid, module, xml_id)
-
- res_res_id = ir_model_data_obj.read(cr, uid, [id],
- ['res_id'])
- if res_res_id:
- res_id = res_res_id[0]['res_id']
+ record_id = ir_model_data_obj._get_id(cr, uid, module, xml_id)
+ ir_model_data = ir_model_data_obj.read(cr, uid, [record_id], ['res_id'])
+ if ir_model_data:
+ res_id = ir_model_data[0]['res_id']
+ else:
+ raise ValueError('No references to %s.%s' % (module, xml_id))
row[field[-1][:-3]] = res_id or False
continue
if (len(field) == len(prefix)+1) and \
msg = _('Insertion Failed! ' + e[1])
return (-1, res, 'Line ' + str(counter) +' : ' + msg, '' )
#Raising Uncaught exception
- raise
+ return (-1, res, 'Line ' + str(counter) +' : ' + str(e), '' )
+
for lang in translate:
context2 = context.copy()
context2['lang'] = lang
data = pickle.load(file(config.get('import_partial')))
data[filename] = initial_size - len(datas) + original_value
pickle.dump(data, file(config.get('import_partial'),'wb'))
+ if context.get('defer_parent_store_computation'):
+ self._parent_store_compute(cr)
cr.commit()
#except Exception, e:
#
# TODO: Send a request with the result and multi-thread !
#
+ if context.get('defer_parent_store_computation'):
+ self._parent_store_compute(cr)
return (done, 0, 0, 0)
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
- raise _('The read method is not implemented on this object !')
+ raise NotImplementedError(_('The read method is not implemented on this object !'))
def get_invalid_fields(self,cr,uid):
return list(self._invalids)
for constraint in self._constraints:
fun, msg, fields = constraint
if not fun(self, cr, uid, ids):
- translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, source=msg) or msg
+ # Check presence of __call__ directly instead of using
+ # callable() because it will be deprecated as of Python 3.0
+ if hasattr(msg, '__call__'):
+ txt_msg, params = msg(self, cr, uid, ids)
+ tmp_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, source=txt_msg) or txt_msg
+ translated_msg = tmp_msg % params
+ else:
+ translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, source=msg) or msg
error_msgs.append(
_("Error occurred while validating the field(s) %s: %s") % (','.join(fields), translated_msg)
)
def default_get(self, cr, uid, fields_list, context=None):
"""
- Set default values for the object's fields.
-
- :param fields_list: fields for which the object doesn't have any value yet, and default values need to be provided.
- If fields outside this list are returned, the user-provided values will be overwritten.
- :rtype: a dict of {field_name:default_value}
+ Returns default values for the fields in fields_list.
+ :param fields_list: list of fields to get the default values for (example ['field1', 'field2',])
+ :type fields_list: list
+ :param context: usual context dictionary - it may contains keys in the form ``default_XXX``,
+ where XXX is a field name to set or override a default value.
+ :return: dictionary of the default values (set on the object model class, through user preferences, or in the context)
"""
- return {}
+ # trigger view init hook
+ self.view_init(cr, uid, fields_list, context)
+
+ if not context:
+ context = {}
+ defaults = {}
+
+ # get the default values for the inherited fields
+ for t in self._inherits.keys():
+ defaults.update(self.pool.get(t).default_get(cr, uid, fields_list,
+ context))
+
+ # get the default values defined in the object
+ for f in fields_list:
+ if f in self._defaults:
+ if callable(self._defaults[f]):
+ defaults[f] = self._defaults[f](self, cr, uid, context)
+ else:
+ defaults[f] = self._defaults[f]
+
+ fld_def = ((f in self._columns) and self._columns[f]) \
+ or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
+ or False
+
+ if isinstance(fld_def, fields.property):
+ property_obj = self.pool.get('ir.property')
+ prop_value = property_obj.get(cr, uid, f, self._name, context=context)
+ if prop_value:
+ if isinstance(prop_value, (browse_record, browse_null)):
+ defaults[f] = prop_value.id
+ else:
+ defaults[f] = prop_value
+ else:
+ if f not in defaults:
+ defaults[f] = False
+
+ # get the default values set by the user and override the default
+ # values defined in the object
+ ir_values_obj = self.pool.get('ir.values')
+ res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
+ for id, field, field_value in res:
+ if field in fields_list:
+ fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
+ if fld_def._type in ('many2one', 'one2one'):
+ obj = self.pool.get(fld_def._obj)
+ if not obj.search(cr, uid, [('id', '=', field_value or False)]):
+ continue
+ if fld_def._type in ('many2many'):
+ obj = self.pool.get(fld_def._obj)
+ field_value2 = []
+ for i in range(len(field_value)):
+ if not obj.search(cr, uid, [('id', '=',
+ field_value[i])]):
+ continue
+ field_value2.append(field_value[i])
+ field_value = field_value2
+ if fld_def._type in ('one2many'):
+ obj = self.pool.get(fld_def._obj)
+ field_value2 = []
+ for i in range(len(field_value)):
+ field_value2.append({})
+ for field2 in field_value[i]:
+ if field2 in obj._columns.keys() and obj._columns[field2]._type in ('many2one', 'one2one'):
+ obj2 = self.pool.get(obj._columns[field2]._obj)
+ if not obj2.search(cr, uid,
+ [('id', '=', field_value[i][field2])]):
+ continue
+ elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type in ('many2one', 'one2one'):
+ obj2 = self.pool.get(obj._inherit_fields[field2][2]._obj)
+ if not obj2.search(cr, uid,
+ [('id', '=', field_value[i][field2])]):
+ continue
+ # TODO add test for many2many and one2many
+ field_value2[i][field2] = field_value[i][field2]
+ field_value = field_value2
+ defaults[field] = field_value
+
+ # get the default values from the context
+ for key in context or {}:
+ if key.startswith('default_') and (key[8:] in fields_list):
+ defaults[key[8:]] = context[key]
+ return defaults
+
def perm_read(self, cr, user, ids, context=None, details=True):
- raise _('The perm_read method is not implemented on this object !')
+ raise NotImplementedError(_('The perm_read method is not implemented on this object !'))
def unlink(self, cr, uid, ids, context=None):
- raise _('The unlink method is not implemented on this object !')
+ raise NotImplementedError(_('The unlink method is not implemented on this object !'))
def write(self, cr, user, ids, vals, context=None):
- raise _('The write method is not implemented on this object !')
+ raise NotImplementedError(_('The write method is not implemented on this object !'))
def create(self, cr, user, vals, context=None):
- raise _('The create method is not implemented on this object !')
+ raise NotImplementedError(_('The create method is not implemented on this object !'))
- # returns the definition of each field in the object
- # the optional fields parameter can limit the result to some fields
- def fields_get_keys(self, cr, user, context=None, read_access=True):
- if context is None:
- context = {}
+ def fields_get_keys(self, cr, user, context=None):
res = self._columns.keys()
for parent in self._inherits:
- res.extend(self.pool.get(parent).fields_get_keys(cr, user, fields, context))
+ res.extend(self.pool.get(parent).fields_get_keys(cr, user, context))
return res
- def fields_get(self, cr, user, fields=None, context=None, read_access=True):
+ # returns the definition of each field in the object
+ # the optional fields parameter can limit the result to some fields
+ def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
if context is None:
context = {}
res = {}
translation_obj = self.pool.get('ir.translation')
- model_access_obj = self.pool.get('ir.model.access')
for parent in self._inherits:
- res.update(self.pool.get(parent).fields_get(cr, user, fields, context))
+ res.update(self.pool.get(parent).fields_get(cr, user, allfields, context))
if self._columns.keys():
for f in self._columns.keys():
- if fields and f not in fields:
+ if allfields and f not in allfields:
continue
res[f] = {'type': self._columns[f]._type}
+ # This additional attributes for M2M and function field is added
+ # because we need to display tooltip with this additional information
+ # when client is started in debug mode.
+ if isinstance(self._columns[f], fields.function):
+ res[f]['function'] = self._columns[f]._fnct and self._columns[f]._fnct.func_name or False
+ res[f]['store'] = self._columns[f].store
+ if isinstance(self._columns[f].store, dict):
+ res[f]['store'] = str(self._columns[f].store)
+ res[f]['fnct_search'] = self._columns[f]._fnct_search and self._columns[f]._fnct_search.func_name or False
+ res[f]['fnct_inv'] = self._columns[f]._fnct_inv and self._columns[f]._fnct_inv.func_name or False
+ res[f]['fnct_inv_arg'] = self._columns[f]._fnct_inv_arg or False
+ res[f]['func_obj'] = self._columns[f]._obj or False
+ res[f]['func_method'] = self._columns[f]._method
+ if isinstance(self._columns[f], fields.many2many):
+ res[f]['related_columns'] = list((self._columns[f]._id1, self._columns[f]._id2))
+ res[f]['third_table'] = self._columns[f]._rel
for arg in ('string', 'readonly', 'states', 'size', 'required', 'group_operator',
- 'change_default', 'translate', 'help', 'select', 'selectable','parent_field'):
+ 'change_default', 'translate', 'help', 'select', 'selectable'):
if getattr(self._columns[f], arg):
res[f][arg] = getattr(self._columns[f], arg)
- if not read_access:
+ if not write_access:
res[f]['readonly'] = True
res[f]['states'] = {}
for arg in ('digits', 'invisible','filters'):
if getattr(self._columns[f], arg, None):
res[f][arg] = getattr(self._columns[f], arg)
- #TODO: optimize
- res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context.get('lang', False) or 'en_US')
+ res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context.get('lang', False) or 'en_US', self._columns[f].string)
if res_trans:
res[f]['string'] = res_trans
help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context.get('lang', False) or 'en_US')
#TODO : read the fields from the database
pass
- if fields:
+ if allfields:
# filter out fields which aren't in the fields list
for r in res.keys():
- if r not in fields:
+ if r not in allfields:
del res[r]
return res
fields = {}
childs = True
- if node.tag == 'field':
+ def encode(s):
+ if isinstance(s, unicode):
+ return s.encode('utf8')
+ return s
+
+ # return True if node can be displayed to current user
+ def check_group(node):
+ if node.get('groups'):
+ groups = node.get('groups').split(',')
+ access_pool = self.pool.get('ir.model.access')
+ can_see = any(access_pool.check_groups(cr, user, group) for group in groups)
+ if not can_see:
+ node.set('invisible', '1')
+ if 'attrs' in node.attrib:
+ del(node.attrib['attrs']) #avoid making field visible later
+ del(node.attrib['groups'])
+ return can_see
+ else:
+ return True
+
+ if node.tag in ('field', 'node', 'arrow'):
+ if node.get('object'):
+ attrs = {}
+ views = {}
+ xml = "<form>"
+ for f in node:
+ if f.tag in ('field'):
+ xml += etree.tostring(f, encoding="utf-8")
+ xml += "</form>"
+ new_xml = etree.fromstring(encode(xml))
+ ctx = context.copy()
+ ctx['base_model_name'] = self._name
+ xarch, xfields = self.pool.get(node.get('object',False)).__view_look_dom_arch(cr, user, new_xml, view_id, ctx)
+ views[str(f.tag)] = {
+ 'arch': xarch,
+ 'fields': xfields
+ }
+ attrs = {'views': views}
+ view = False
+ fields = views.get('field',False) and views['field'].get('fields',False)
if node.get('name'):
attrs = {}
try:
column = False
if column:
- relation = column._obj
+ relation = self.pool.get(column._obj)
+
childs = False
views = {}
for f in node:
node.remove(f)
ctx = context.copy()
ctx['base_model_name'] = self._name
- xarch, xfields = self.pool.get(relation).__view_look_dom_arch(cr, user, f, view_id, ctx)
+ xarch, xfields = relation.__view_look_dom_arch(cr, user, f, view_id, ctx)
views[str(f.tag)] = {
'arch': xarch,
'fields': xfields
}
attrs = {'views': views}
if node.get('widget') and node.get('widget') == 'selection':
+ if not check_group(node):
+ name = node.get('name')
+ default = self.default_get(cr, user, [name], context=context).get(name)
+ if default:
+ attrs['selection'] = relation.name_get(cr, 1, [default], context=context)
+ else:
+ attrs['selection'] = []
# We can not use the 'string' domain has it is defined according to the record !
- dom = []
- if column._domain and not isinstance(column._domain, (str, unicode)):
- dom = column._domain
- dom += eval(node.get('domain','[]'), {'uid':user, 'time':time})
- context.update(eval(node.get('context','{}')))
- attrs['selection'] = self.pool.get(relation).name_search(cr, user, '', dom, context=context)
- if (node.get('required') and not int(node.get('required'))) or not column.required:
- attrs['selection'].append((False,''))
+ else:
+ # If domain and context are strings, we keep them for client-side, otherwise
+ # we evaluate them server-side to consider them when generating the list of
+ # possible values
+ # TODO: find a way to remove this hack, by allow dynamic domains
+ dom = []
+ if column._domain and not isinstance(column._domain, basestring):
+ dom = column._domain
+ dom += eval(node.get('domain','[]'), {'uid':user, 'time':time})
+ search_context = dict(context)
+ if column._context and not isinstance(column._context, basestring):
+ search_context.update(column._context)
+ attrs['selection'] = relation._name_search(cr, 1, '', dom, context=search_context, limit=None, name_get_uid=1)
+ if (node.get('required') and not int(node.get('required'))) or not column.required:
+ attrs['selection'].append((False,''))
fields[node.get('name')] = attrs
elif node.tag in ('form', 'tree'):
fields[node.get(additional_field)] = {}
if 'groups' in node.attrib:
- if node.get('groups'):
- groups = node.get('groups').split(',')
- readonly = False
- access_pool = self.pool.get('ir.model.access')
- for group in groups:
- readonly = readonly or access_pool.check_groups(cr, user, group)
- if not readonly:
- node.set('invisible', '1')
- del(node.attrib['groups'])
+ check_group(node)
# translate view
if ('lang' in context) and not result:
arch = etree.tostring(node, encoding="utf-8").replace('\t', '')
- #code for diagram view.
fields={}
if node.tag=='diagram':
if node.getchildren()[0].tag=='node':
fields[key]=value
else:
fields = self.fields_get(cr, user, fields_def.keys(), context)
-
for field in fields_def:
if field == 'id':
# sometime, the view may containt the (invisible) field 'id' needed for a domain (when 2 objects have cross references)
sql_res = False
while ok:
view_ref = context.get(view_type + '_view_ref', False)
- if view_ref:
+ if view_ref and not view_id:
if '.' in view_ref:
module, view_ref = view_ref.split('.', 1)
cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
view_id = view_ref_res[0]
if view_id:
- where = (model and (" and model='%s'" % (self._name,))) or ''
- cr.execute('SELECT arch,name,field_parent,id,type,inherit_id FROM ir_ui_view WHERE id=%s'+where, (view_id,))
+ query = "SELECT arch,name,field_parent,id,type,inherit_id FROM ir_ui_view WHERE id=%s"
+ params = (view_id,)
+ if model:
+ query += " AND model=%s"
+ params += (self._name,)
+ cr.execute(query, params)
else:
cr.execute('''SELECT
arch,name,field_parent,id,type,inherit_id
def search(self, cr, user, args, offset=0, limit=None, order=None,
context=None, count=False):
- raise _('The search method is not implemented on this object !')
+ raise NotImplementedError(_('The search method is not implemented on this object !'))
def name_get(self, cr, user, ids, context=None):
- raise _('The name_get method is not implemented on this object !')
+ raise NotImplementedError(_('The name_get method is not implemented on this object !'))
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
- raise _('The name_search method is not implemented on this object !')
+ raise NotImplementedError(_('The name_search method is not implemented on this object !'))
def copy(self, cr, uid, id, default=None, context=None):
- raise _('The copy method is not implemented on this object !')
+ raise NotImplementedError(_('The copy method is not implemented on this object !'))
def exists(self, cr, uid, id, context=None):
- raise _('The exists method is not implemented on this object !')
+ raise NotImplementedError(_('The exists method is not implemented on this object !'))
def read_string(self, cr, uid, id, langs, fields=None, context=None):
res = {}
for lang in langs:
for field in vals:
if field in self._columns:
- self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field])
+ src = self._columns[field].string
+ self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), vals)
if cols:
self.check_id = 0
cr.execute('delete from wkf_instance where res_type=%s', (self._name,))
+ def _check_access(self, uid, object_id, mode):
+ if uid != 1 and self.datas[object_id]['internal.create_uid'] != uid:
+ raise except_orm(_('AccessError'), '%s access is only allowed on your own records for osv_memory objects except for the super-user' % mode.capitalize())
+
def vaccum(self, cr, uid):
self.check_id += 1
if self.check_id % self._check_time:
for id in self.datas:
if self.datas[id]['internal.date_access'] < max:
tounlink.append(id)
- self.unlink(cr, uid, tounlink)
+ self.unlink(cr, 1, tounlink)
if len(self.datas)>self._max_count:
sorted = map(lambda x: (x[1]['internal.date_access'], x[0]), self.datas.items())
sorted.sort()
for id in ids:
r = {'id': id}
for f in fields_to_read:
- if id in self.datas:
- r[f] = self.datas[id].get(f, False)
+ record = self.datas.get(id)
+ if record:
+ self._check_access(user, id, 'read')
+ r[f] = record.get(f, False)
if r[f] and isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
r[f] = len(r[f])
result.append(r)
vals2[field] = vals[field]
else:
upd_todo.append(field)
- for id_new in ids:
- self.datas[id_new].update(vals2)
- self.datas[id_new]['internal.date_access'] = time.time()
+ for object_id in ids:
+ self._check_access(user, object_id, mode='write')
+ self.datas[object_id].update(vals2)
+ self.datas[object_id]['internal.date_access'] = time.time()
for field in upd_todo:
- self._columns[field].set_memory(cr, self, id_new, field, vals[field], user, context)
- self._validate(cr, user, [id_new], context)
+ self._columns[field].set_memory(cr, self, object_id, field, vals[field], user, context)
+ self._validate(cr, user, [object_id], context)
wf_service = netsvc.LocalService("workflow")
- wf_service.trg_write(user, self._name, id_new, cr)
- return id_new
+ wf_service.trg_write(user, self._name, object_id, cr)
+ return object_id
def create(self, cr, user, vals, context=None):
self.vaccum(cr, user)
self.next_id += 1
id_new = self.next_id
- default = []
- for f in self._columns.keys():
- if not f in vals:
- default.append(f)
- if len(default):
- vals.update(self.default_get(cr, user, default, context))
+
+ # override defaults with the provided values, never allow the other way around
+ defaults = self.default_get(cr, user, [], context)
+ defaults.update(vals)
+ vals = defaults
+
vals2 = {}
upd_todo = []
for field in vals:
upd_todo.append(field)
self.datas[id_new] = vals2
self.datas[id_new]['internal.date_access'] = time.time()
+ self.datas[id_new]['internal.create_uid'] = user
for field in upd_todo:
self._columns[field].set_memory(cr, self, id_new, field, vals[field], user, context)
self._validate(cr, user, [id_new], context)
+ if self._log_create and not (context and context.get('no_store_function', False)):
+ message = self._description + \
+ " '" + \
+ self.name_get(cr, user, [id_new], context=context)[0][1] + \
+ "' "+ _("created.")
+ self.log(cr, user, id_new, message, True, context=context)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_create(user, self._name, id_new, cr)
return id_new
- def default_get(self, cr, uid, fields_list, context=None):
- self.view_init(cr, uid, fields_list, context)
- if not context:
- context = {}
- value = {}
- # get the default values for the inherited fields
- for f in fields_list:
- if f in self._defaults:
- if callable(self._defaults[f]):
- value[f] = self._defaults[f](self, cr, uid, context)
- else:
- value[f] = self._defaults[f]
-
- fld_def = ((f in self._columns) and self._columns[f]) \
- or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
- or False
-
- # get the default values set by the user and override the default
- # values defined in the object
- ir_values_obj = self.pool.get('ir.values')
- res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
- for id, field, field_value in res:
- if field in fields_list:
- fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
- if fld_def._type in ('many2one', 'one2one'):
- obj = self.pool.get(fld_def._obj)
- if not obj.search(cr, uid, [('id', '=', field_value)]):
- continue
- if fld_def._type in ('many2many'):
- obj = self.pool.get(fld_def._obj)
- field_value2 = []
- for i in range(len(field_value)):
- if not obj.search(cr, uid, [('id', '=',
- field_value[i])]):
- continue
- field_value2.append(field_value[i])
- field_value = field_value2
- if fld_def._type in ('one2many'):
- obj = self.pool.get(fld_def._obj)
- field_value2 = []
- for i in range(len(field_value)):
- field_value2.append({})
- for field2 in field_value[i]:
- if obj._columns[field2]._type in ('many2one', 'one2one'):
- obj2 = self.pool.get(obj._columns[field2]._obj)
- if not obj2.search(cr, uid,
- [('id', '=', field_value[i][field2])]):
- continue
- # TODO add test for many2many and one2many
- field_value2[i][field2] = field_value[i][field2]
- field_value = field_value2
- value[field] = field_value
-
- # get the default values from the context
- for key in context or {}:
- if key.startswith('default_') and (key[8:] in fields_list):
- value[key[8:]] = context[key]
- return value
-
def _where_calc(self, cr, user, args, active_test=True, context=None):
if not context:
context = {}
import expression
e = expression.expression(args)
e.parse(cr, user, self, context)
- res=e.__dict__['_expression__exp']
+ res = e.exp
return res or []
-
def search(self, cr, user, args, offset=0, limit=None, order=None,
context=None, count=False):
if not context:
context = {}
+
+ # implicit filter on current user except for superuser
+ if user != 1:
+ if not args:
+ args = []
+ args.insert(0, ('internal.create_uid', '=', user))
+
result = self._where_calc(cr, user, args, context=context)
if result==[]:
return self.datas.keys()
if result:
for id, data in self.datas.items():
counter=counter+1
- data['id'] = id
- if limit and (counter >int(limit)):
+ data['id'] = id
+ if limit and (counter > int(limit)):
break
f = True
for arg in result:
- if arg[1] =='=':
- val =eval('data[arg[0]]'+'==' +' arg[2]')
- elif arg[1] in ['<','>','in','not in','<=','>=','<>']:
- val =eval('data[arg[0]]'+arg[1] +' arg[2]')
- elif arg[1] in ['ilike']:
- if str(data[arg[0]]).find(str(arg[2]))!=-1:
- val= True
- else:
- val=False
-
- if f and val:
- f = True
- else:
- f = False
+ if arg[1] == '=':
+ val = eval('data[arg[0]]'+'==' +' arg[2]', locals())
+ elif arg[1] in ['<','>','in','not in','<=','>=','<>']:
+ val = eval('data[arg[0]]'+arg[1] +' arg[2]', locals())
+ elif arg[1] in ['ilike']:
+ val = (str(data[arg[0]]).find(str(arg[2]))!=-1)
+
+ f = f and val
+
if f:
res.append(id)
if count:
def unlink(self, cr, uid, ids, context=None):
for id in ids:
- if id in self.datas:
- del self.datas[id]
+ self._check_access(uid, id, 'unlink')
+ self.datas.pop(id, None)
if len(ids):
- cr.execute('delete from wkf_instance where res_type=%s and res_id = ANY (%s)', (self._name,ids))
+ cr.execute('delete from wkf_instance where res_type=%s and res_id IN %s', (self._name, tuple(ids)))
return True
def perm_read(self, cr, user, ids, context=None, details=True):
result = []
+ credentials = self.pool.get('res.users').name_get(cr, user, [user])[0]
+ create_date = time.strftime('%Y-%m-%d %H:%M:%S')
for id in ids:
+ self._check_access(user, id, 'read')
result.append({
- 'create_uid': (user, 'Root'),
- 'create_date': time.strftime('%Y-%m-%d %H:%M:%S'),
+ 'create_uid': credentials,
+ 'create_date': create_date,
'write_uid': False,
'write_date': False,
'id': id
* if user tries to bypass access rules for read on the requested object
"""
- groupby_list = groupby
- if isinstance(groupby, list):
- groupby = groupby[0]
context = context or {}
self.pool.get('ir.model.access').check(cr, uid, self._name, 'read', context=context)
if not fields:
fields = self._columns.keys()
- (where_clause, where_params, tables) = self._where_calc(cr, uid, domain, context=context)
- dom = self.pool.get('ir.rule').domain_get(cr, uid, self._name, 'read', context=context)
- where_clause = where_clause + dom[0]
- where_params = where_params + dom[1]
- for t in dom[2]:
- if t not in tables:
- tables.append(t)
+ # compute the where, order by, limit and offset clauses
+ (where_clause, where_clause_params, tables) = self._where_calc(cr, uid, domain, context=context)
+
+ # apply direct ir.rules from current model
+ self._apply_ir_rules(cr, uid, where_clause, where_clause_params, tables, 'read', context=context)
+
+ # then apply the ir.rules from the parents (through _inherits), adding the appropriate JOINs if needed
+ for inherited_model in self._inherits:
+ previous_tables = list(tables)
+ if self._apply_ir_rules(cr, uid, where_clause, where_clause_params, tables, 'read', model_name=inherited_model, context=context):
+ # if some rules were applied, need to add the missing JOIN for them to make sense, passing the previous
+ # list of table in case the inherited table was not in the list before (as that means the corresponding
+ # JOIN(s) was(were) not present)
+ self._inherits_join_add(inherited_model, previous_tables, where_clause)
+ tables = list(set(tables).union(set(previous_tables)))
# Take care of adding join(s) if groupby is an '_inherits'ed field
- tables, where_clause = self._inherits_join_calc(groupby,tables,where_clause)
+ groupby_list = groupby
+ if groupby:
+ if groupby and isinstance(groupby, list):
+ groupby = groupby[0]
+ tables, where_clause, qfield = self._inherits_join_calc(groupby,tables,where_clause)
if len(where_clause):
where_clause = ' where '+string.join(where_clause, ' and ')
float_int_fields = filter(lambda x: fget[x]['type'] in ('float','integer'), fields)
sum = {}
+ flist = ''
group_by = groupby
- if fget.get(groupby,False) and fget[groupby]['type'] in ('date','datetime'):
- flist = "to_char(%s,'yyyy-mm') as %s "%(groupby,groupby)
- groupby = "to_char(%s,'yyyy-mm')"%(groupby)
- else:
- flist = groupby
+ if groupby:
+ if fget.get(groupby,False) and fget[groupby]['type'] in ('date','datetime'):
+ flist = "to_char(%s,'yyyy-mm') as %s "%(groupby,groupby)
+ groupby = "to_char(%s,'yyyy-mm')"%(groupby)
+ else:
+ flist = groupby
+
fields_pre = [f for f in float_int_fields if
f == self.CONCURRENCY_CHECK_FIELD
for f in fields_pre:
if f not in ['id','sequence']:
operator = fget[f].get('group_operator','sum')
- flist += ','+operator+'('+f+') as '+f
+ if flist:
+ flist += ','
+ flist += operator+'('+f+') as '+f
- cr.execute('select min(%s.id) as id,' % self._table + flist + ' from ' + ','.join(tables) + where_clause + ' group by '+ groupby + limit_str + offset_str, where_params)
+ if groupby:
+ gb = ' group by '+groupby
+ else:
+ gb = ''
+ cr.execute('select min(%s.id) as id,' % self._table + flist + ' from ' + ','.join(tables) + where_clause + gb + limit_str + offset_str, where_clause_params)
alldata = {}
groupby = group_by
for r in cr.dictfetchall():
if val == None:r[fld] = False
alldata[r['id']] = r
del r['id']
- data = self.read(cr, uid, alldata.keys(), [groupby], context=context)
- today = datetime.date.today()
-
+ data = self.read(cr, uid, alldata.keys(), groupby and [groupby] or ['id'], context=context)
for d in data:
- d['__domain'] = [(groupby,'=',alldata[d['id']][groupby] or False)] + domain
- if not isinstance(groupby_list,(str, unicode)):
- d['__context'] = {'group_by':groupby_list[1:]}
- if fget.has_key(groupby):
+ if groupby:
+ d['__domain'] = [(groupby,'=',alldata[d['id']][groupby] or False)] + domain
+ if not isinstance(groupby_list,(str, unicode)):
+ if groupby or not context.get('group_by_no_leaf', False):
+ d['__context'] = {'group_by':groupby_list[1:]}
+ if groupby and fget.has_key(groupby):
if d[groupby] and fget[groupby]['type'] in ('date','datetime'):
- dt = datetime.datetime.strptime(alldata[d['id']][groupby][:7],'%Y-%m')
- days = calendar.monthrange(dt.year, dt.month)[1]
+ dt = datetime.datetime.strptime(alldata[d['id']][groupby][:7],'%Y-%m')
+ days = calendar.monthrange(dt.year, dt.month)[1]
- d[groupby] = datetime.datetime.strptime(d[groupby][:10],'%Y-%m-%d').strftime('%B %Y')
- d['__domain'] = [(groupby,'>=',alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-01','%Y-%m-%d').strftime('%Y-%m-%d') or False),\
- (groupby,'<=',alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-' + str(days),'%Y-%m-%d').strftime('%Y-%m-%d') or False)] + domain
- elif fget[groupby]['type'] == 'many2one':
- d[groupby] = d[groupby] and ((type(d[groupby])==type(1)) and d[groupby] or d[groupby][1]) or ''
-
- del alldata[d['id']][groupby]
+ d[groupby] = datetime.datetime.strptime(d[groupby][:10],'%Y-%m-%d').strftime('%B %Y')
+ d['__domain'] = [(groupby,'>=',alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-01','%Y-%m-%d').strftime('%Y-%m-%d') or False),\
+ (groupby,'<=',alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-' + str(days),'%Y-%m-%d').strftime('%Y-%m-%d') or False)] + domain
+ del alldata[d['id']][groupby]
d.update(alldata[d['id']])
del d['id']
return data
- def _inherits_join_calc(self, field, tables, where_clause):
+ def _inherits_join_add(self, parent_model_name, tables, where_clause):
"""
- Adds missing table select and join clause(s) for reaching
- the field coming from an '_inherits' parent table.
+ Add missing table SELECT and JOIN clause for reaching the parent table (no duplicates)
- :param tables: list of table._table names enclosed in double quotes as returned
- by _where_calc()
+ :param parent_model_name: name of the parent model for which the clauses should be added
+ :param tables: list of table._table names enclosed in double quotes as returned
+ by _where_calc()
+ :param where_clause: current list of WHERE clause params
+ """
+ inherits_field = self._inherits[parent_model_name]
+ parent_model = self.pool.get(parent_model_name)
+ parent_table_name = parent_model._table
+ quoted_parent_table_name = '"%s"' % parent_table_name
+ if quoted_parent_table_name not in tables:
+ tables.append(quoted_parent_table_name)
+ where_clause.append('("%s".%s = %s.id)' % (self._table, inherits_field, parent_table_name))
+ return (tables, where_clause)
+ def _inherits_join_calc(self, field, tables, where_clause):
+ """
+ Adds missing table select and join clause(s) for reaching
+ the field coming from an '_inherits' parent table (no duplicates).
+
+ :param tables: list of table._table names enclosed in double quotes as returned
+ by _where_calc()
+ :param where_clause: current list of WHERE clause params
+ :return: (table, where_clause, qualified_field) where ``table`` and ``where_clause`` are the updated
+ versions of the parameters, and ``qualified_field`` is the qualified name of ``field``
+ in the form ``table.field``, to be referenced in queries.
"""
current_table = self
while field in current_table._inherit_fields and not field in current_table._columns:
- parent_table = self.pool.get(current_table._inherit_fields[field][0])
- parent_table_name = parent_table._table
- if '"%s"'%parent_table_name not in tables:
- tables.append('"%s"'%parent_table_name)
- where_clause.append('(%s.%s = %s.id)' % (current_table._table, current_table._inherits[parent_table._name], parent_table_name))
+ parent_model_name = current_table._inherit_fields[field][0]
+ parent_table = self.pool.get(parent_model_name)
+ self._inherits_join_add(parent_model_name, tables, where_clause)
current_table = parent_table
- return (tables, where_clause)
+ return (tables, where_clause, '"%s".%s' % (current_table._table, field))
def _parent_store_compute(self, cr):
+ if not self._parent_store:
+ return
logger = netsvc.Logger()
logger.notifyChannel('orm', netsvc.LOG_INFO, 'Computing parent left and right for table %s...' % (self._table, ))
def browse_rec(root, pos=0):
columns += ('id', 'write_uid', 'write_date', 'create_uid', 'create_date') # openerp access columns
cr.execute("SELECT a.attname, a.attnotnull"
" FROM pg_class c, pg_attribute a"
- " WHERE c.relname=%%s"
+ " WHERE c.relname=%s"
" AND c.oid=a.attrelid"
- " AND a.attisdropped=%%s"
+ " AND a.attisdropped=%s"
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
- " AND a.attname NOT IN (%s)" % ",".join(['%s']*len(columns)),
- [self._table, False] + columns)
+ " AND a.attname NOT IN %s" ,(self._table, False, tuple(columns))),
+
for column in cr.dictfetchall():
if log:
logger.notifyChannel("orm", netsvc.LOG_DEBUG, "column %s is in the table %s but not in the corresponding object %s" % (column['attname'], self._table, self._name))
todo_end = []
self._field_create(cr, context=context)
if getattr(self, '_auto', True):
- cr.execute("SELECT relname FROM pg_class WHERE relkind in ('r','v') AND relname='%s'" % self._table)
+ cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s" ,( self._table,))
if not cr.rowcount:
- cr.execute("CREATE TABLE \"%s\" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS" % self._table)
+ cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,))
cr.execute("COMMENT ON TABLE \"%s\" IS '%s'" % (self._table, self._description.replace("'","''")))
create = True
cr.commit()
logger.notifyChannel('orm', netsvc.LOG_ERROR, 'create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)' % (self._table, ))
if 'parent_right' not in self._columns:
logger.notifyChannel('orm', netsvc.LOG_ERROR, 'create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)' % (self._table, ))
- if self._columns[self._parent_name].ondelete<>'cascade':
- logger.notifyChannel('orm', netsvc.LOG_ERROR, "the columns %s on object must be set as ondelete='cascasde'" % (self._name, self._parent_name))
+ if self._columns[self._parent_name].ondelete != 'cascade':
+ logger.notifyChannel('orm', netsvc.LOG_ERROR, "The column %s on object %s must be set as ondelete='cascade'" % (self._parent_name, self._name))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
cr.commit()
if not res:
cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY (%s) REFERENCES "%s" ON DELETE SET NULL' % (self._obj, f._fields_id, f._table))
elif isinstance(f, fields.many2many):
- cr.execute("SELECT relname FROM pg_class WHERE relkind in ('r','v') AND relname=%s", (f._rel,))
+ cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (f._rel,))
if not cr.dictfetchall():
if not self.pool.get(f._obj):
raise except_orm('Programming Error', ('There is no reference available for %s') % (f._obj,))
res = res_old
res[0]['attname'] = k
- if not res:
- if not isinstance(f, fields.function) or f.store:
- # add the missing field
- cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
- cr.execute("COMMENT ON COLUMN %s.%s IS '%s'" % (self._table, k, f.string.replace("'","''")))
-
- # initialize it
- if not create and k in self._defaults:
- if callable(self._defaults[k]):
- default = self._defaults[k](self, cr, 1, context)
- else:
- default = self._defaults[k]
-
- ss = self._columns[k]._symbol_set
- query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
- cr.execute(query, (ss[1](default),))
- cr.commit()
- logger.notifyChannel('orm', netsvc.LOG_DEBUG, 'setting default value of new column %s of table %s'% (k, self._table))
- elif not create:
- logger.notifyChannel('orm', netsvc.LOG_DEBUG, 'creating new column %s of table %s'% (k, self._table))
-
- if isinstance(f, fields.function):
- order = 10
- if f.store is not True:
- order = f.store[f.store.keys()[0]][2]
- todo_update_store.append((order, f,k))
-
- # and add constraints if needed
- if isinstance(f, fields.many2one):
- if not self.pool.get(f._obj):
- raise except_orm('Programming Error', ('There is no reference available for %s') % (f._obj,))
- ref = self.pool.get(f._obj)._table
-# ref = f._obj.replace('.', '_')
- # ir_actions is inherited so foreign key doesn't work on it
- if ref != 'ir_actions':
- cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (self._table, k, ref, f.ondelete))
- if f.select:
- cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
- if f.required:
- try:
- cr.commit()
- cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
- except Exception, e:
- logger.notifyChannel('orm', netsvc.LOG_WARNING, 'WARNING: unable to set column %s of table %s not null !\nTry to re-run: openerp-server.py --update=module\nIf it doesn\'t work, update records and execute manually:\nALTER TABLE %s ALTER COLUMN %s SET NOT NULL' % (k, self._table, self._table, k))
- cr.commit()
- elif len(res)==1:
+ if len(res)==1:
f_pg_def = res[0]
f_pg_type = f_pg_def['typname']
f_pg_size = f_pg_def['size']
('varchar', 'text', 'TEXT', ''),
('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
+ ('timestamp', 'date', 'date', '::date'),
('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
]
- # !!! Avoid reduction of varchar field !!!
if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size < f.size:
- # if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size != f.size:
logger.notifyChannel('orm', netsvc.LOG_INFO, "column '%s' in table '%s' changed size" % (k, self._table))
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" VARCHAR(%d)' % (self._table, k, f.size))
if f_pg_type != f_obj_type:
if not ok:
- logger.notifyChannel('orm', netsvc.LOG_WARNING, "column '%s' in table '%s' has changed type (DB = %s, def = %s) but unable to migrate this change !" % (k, self._table, f_pg_type, f._type))
+ i = 0
+ while True:
+ newname = self._table + '_moved' + str(i)
+ cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
+ "WHERE c.relname=%s " \
+ "AND a.attname=%s " \
+ "AND c.oid=a.attrelid ", (self._table, newname))
+ if not cr.fetchone()[0]:
+ break
+ i+=1
+ logger.notifyChannel('orm', netsvc.LOG_WARNING, "column '%s' in table '%s' has changed type (DB=%s, def=%s), data moved to table %s !" % (k, self._table, f_pg_type, f._type, newname))
+ if f_pg_notnull:
+ cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
+ cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
+ cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
+ cr.execute("COMMENT ON COLUMN %s.%s IS '%s'" % (self._table, k, f.string.replace("'","''")))
# if the field is required and hasn't got a NOT NULL constraint
if f.required and f_pg_notnull == 0:
try:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
cr.commit()
- except Exception, e:
+ except Exception:
logger.notifyChannel('orm', netsvc.LOG_WARNING, 'unable to set a NOT NULL constraint on column %s of the %s table !\nIf you want to have it, you should update the records and execute manually:\nALTER TABLE %s ALTER COLUMN %s SET NOT NULL' % (k, self._table, self._table, k))
cr.commit()
elif not f.required and f_pg_notnull == 1:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
+
+ # Verify index
indexname = '%s_%s_index' % (self._table, k)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
- res = cr.dictfetchall()
- if not res and f.select:
+ res2 = cr.dictfetchall()
+ if not res2 and f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
cr.commit()
- if res and not f.select:
+ if f._type == 'text':
+ # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
+ logger.notifyChannel('orm', netsvc.LOG_WARNING, "Adding (b-tree) index for text column '%s' in table '%s'."\
+ "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts because there is a length limit for indexable btree values!\n"\
+ "Use a search view instead if you simply want to make the field searchable." % (k, f._type, self._table))
+ if res2 and not f.select:
cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
+ logger.notifyChannel('orm', netsvc.LOG_WARNING, "Dropping index for column '%s' of type '%s' in table '%s' as it is not required anymore" % (k, f._type, self._table))
+
if isinstance(f, fields.many2one):
ref = self.pool.get(f._obj)._table
if ref != 'ir_actions':
'AND att2.attrelid = cl2.oid '
'AND att2.attname = %s '
"AND con.contype = 'f'", (self._table, ref, k, 'id'))
- res = cr.dictfetchall()
- if res:
- if res[0]['confdeltype'] != POSTGRES_CONFDELTYPES.get(f.ondelete.upper(), 'a'):
- cr.execute('ALTER TABLE "' + self._table + '" DROP CONSTRAINT "' + res[0]['conname'] + '"')
+ res2 = cr.dictfetchall()
+ if res2:
+ if res2[0]['confdeltype'] != POSTGRES_CONFDELTYPES.get(f.ondelete.upper(), 'a'):
+ cr.execute('ALTER TABLE "' + self._table + '" DROP CONSTRAINT "' + res2[0]['conname'] + '"')
cr.execute('ALTER TABLE "' + self._table + '" ADD FOREIGN KEY ("' + k + '") REFERENCES "' + ref + '" ON DELETE ' + f.ondelete)
cr.commit()
- else:
+ elif len(res)>1:
logger.notifyChannel('orm', netsvc.LOG_ERROR, "Programming error, column %s->%s has multiple instances !"%(self._table,k))
+ if not res:
+ if not isinstance(f, fields.function) or f.store:
+
+ # add the missing field
+ cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
+ cr.execute("COMMENT ON COLUMN %s.%s IS '%s'" % (self._table, k, f.string.replace("'","''")))
+
+ # initialize it
+ if not create and k in self._defaults:
+ if callable(self._defaults[k]):
+ default = self._defaults[k](self, cr, 1, context)
+ else:
+ default = self._defaults[k]
+
+ ss = self._columns[k]._symbol_set
+ query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
+ cr.execute(query, (ss[1](default),))
+ cr.commit()
+ logger.notifyChannel('orm', netsvc.LOG_DEBUG, 'setting default value of new column %s of table %s'% (k, self._table))
+ elif not create:
+ logger.notifyChannel('orm', netsvc.LOG_DEBUG, 'creating new column %s of table %s'% (k, self._table))
+
+ if isinstance(f, fields.function):
+ order = 10
+ if f.store is not True:
+ order = f.store[f.store.keys()[0]][2]
+ todo_update_store.append((order, f,k))
+
+ # and add constraints if needed
+ if isinstance(f, fields.many2one):
+ if not self.pool.get(f._obj):
+ raise except_orm('Programming Error', ('There is no reference available for %s') % (f._obj,))
+ ref = self.pool.get(f._obj)._table
+# ref = f._obj.replace('.', '_')
+ # ir_actions is inherited so foreign key doesn't work on it
+ if ref != 'ir_actions':
+ cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (self._table, k, ref, f.ondelete))
+ if f.select:
+ cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
+ if f.required:
+ try:
+ cr.commit()
+ cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
+ except Exception:
+ logger.notifyChannel('orm', netsvc.LOG_WARNING, 'WARNING: unable to set column %s of table %s not null !\nTry to re-run: openerp-server.py --update=module\nIf it doesn\'t work, update records and execute manually:\nALTER TABLE %s ALTER COLUMN %s SET NOT NULL' % (k, self._table, self._table, k))
+ cr.commit()
for order,f,k in todo_update_store:
todo_end.append((order, self._update_store, (f, k)))
else:
- cr.execute("SELECT relname FROM pg_class WHERE relkind in ('r','v') AND relname=%s", (self._table,))
+ cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
create = not bool(cr.fetchone())
+ cr.commit() # start a new transaction
+
for (key, con, _) in self._sql_constraints:
conname = '%s_%s' % (self._table, key)
cr.execute("SELECT conname FROM pg_constraint where conname=%s", (conname,))
if not cr.dictfetchall():
+ query = 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,)
try:
- cr.execute('alter table "%s" add constraint "%s_%s" %s' % (self._table, self._table, key, con,))
+ cr.execute(query)
cr.commit()
except:
- logger.notifyChannel('orm', netsvc.LOG_WARNING, 'unable to add \'%s\' constraint on table %s !\n If you want to have it, you should update the records and execute manually:\nALTER table %s ADD CONSTRAINT %s_%s %s' % (con, self._table, self._table, self._table, key, con,))
+ logger.notifyChannel('orm', netsvc.LOG_WARNING, 'unable to add \'%s\' constraint on table %s !\n If you want to have it, you should update the records and execute manually:\n%s' % (con, self._table, query))
+ cr.rollback()
if create:
if hasattr(self, "_sql"):
cr.commit()
if store_compute:
self._parent_store_compute(cr)
+ cr.commit()
return todo_end
def __init__(self, cr):
self._columns[field['name']] = getattr(fields, field['ttype'])(field['relation'], _rel_name, 'id1', 'id2', **attrs)
else:
self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
-
+ self._inherits_check()
self._inherits_reload()
if not self._sequence:
self._sequence = self._table+'_id_seq'
for f in self._columns:
self._columns[f].restart()
- def default_get(self, cr, uid, fields_list, context=None):
- """
- To Get default field values of given fields list of the model
-
- :param cr: database cursor
- :param uid: current user id
- :param fields_list: list of fields to get the default value
- :type fields_list: list (example ['field1', 'field2',])
- :param context: context arguments, like lang, time zone
- :return: dictionary of the default values for fields (set on the object class, by the user preferences, or via the context)
-
- """
- if not context:
- context = {}
- value = {}
- # get the default values for the inherited fields
- for t in self._inherits.keys():
- value.update(self.pool.get(t).default_get(cr, uid, fields_list,
- context))
-
- # get the default values defined in the object
- for f in fields_list:
- if f in self._defaults:
- if callable(self._defaults[f]):
- value[f] = self._defaults[f](self, cr, uid, context)
- else:
- value[f] = self._defaults[f]
-
- fld_def = ((f in self._columns) and self._columns[f]) \
- or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
- or False
- if isinstance(fld_def, fields.property):
- property_obj = self.pool.get('ir.property')
- definition_id = fld_def._field_get(cr, uid, self._name, f)
- nid = property_obj.search(cr, uid, [('fields_id', '=',
- definition_id), ('res_id', '=', False)])
- if nid:
- prop_value = property_obj.browse(cr, uid, nid[0],
- context=context).value
- value[f] = (prop_value and int(prop_value.split(',')[1])) \
- or False
-
- # get the default values set by the user and override the default
- # values defined in the object
- ir_values_obj = self.pool.get('ir.values')
- res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
- for id, field, field_value in res:
- if field in fields_list:
- fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
- if fld_def._type in ('many2one', 'one2one'):
- obj = self.pool.get(fld_def._obj)
- if not obj.search(cr, uid, [('id', '=', field_value or False)]):
- continue
- if fld_def._type in ('many2many'):
- obj = self.pool.get(fld_def._obj)
- field_value2 = []
- for i in range(len(field_value)):
- if not obj.search(cr, uid, [('id', '=',
- field_value[i])]):
- continue
- field_value2.append(field_value[i])
- field_value = field_value2
- if fld_def._type in ('one2many'):
- obj = self.pool.get(fld_def._obj)
- field_value2 = []
- for i in range(len(field_value)):
- field_value2.append({})
- for field2 in field_value[i]:
- if obj._columns[field2]._type in ('many2one', 'one2one'):
- obj2 = self.pool.get(obj._columns[field2]._obj)
- if not obj2.search(cr, uid,
- [('id', '=', field_value[i][field2])]):
- continue
- # TODO add test for many2many and one2many
- field_value2[i][field2] = field_value[i][field2]
- field_value = field_value2
- value[field] = field_value
- for key in context or {}:
- if key.startswith('default_') and (key[8:] in fields_list):
- value[key[8:]] = context[key]
- return value
-
#
# Update objects that uses this one to update their _inherits fields
#
self._inherit_fields = res
self._inherits_reload_src()
+ def _inherits_check(self):
+ for table, field_name in self._inherits.items():
+ if field_name not in self._columns:
+ logging.getLogger('init').info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.' % (field_name, self._name))
+ self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
+ required=True, ondelete="cascade")
+ elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() != "cascade":
+ logging.getLogger('init').warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade", forcing it.' % (field_name, self._name))
+ self._columns[field_name].required = True
+ self._columns[field_name].ondelete = "cascade"
+
+ #def __getattr__(self, name):
+ # """
+ # Proxies attribute accesses to the `inherits` parent so we can call methods defined on the inherited parent
+ # (though inherits doesn't use Python inheritance).
+ # Handles translating between local ids and remote ids.
+ # Known issue: doesn't work correctly when using python's own super(), don't involve inherit-based inheritance
+ # when you have inherits.
+ # """
+ # for model, field in self._inherits.iteritems():
+ # proxy = self.pool.get(model)
+ # if hasattr(proxy, name):
+ # attribute = getattr(proxy, name)
+ # if not hasattr(attribute, '__call__'):
+ # return attribute
+ # break
+ # else:
+ # return super(orm, self).__getattr__(name)
+
+ # def _proxy(cr, uid, ids, *args, **kwargs):
+ # objects = self.browse(cr, uid, ids, kwargs.get('context', None))
+ # lst = [obj[field].id for obj in objects if obj[field]]
+ # return getattr(proxy, name)(cr, uid, lst, *args, **kwargs)
+
+ # return _proxy
+
+
def fields_get(self, cr, user, fields=None, context=None):
"""
Get the description of list of fields
"""
ira = self.pool.get('ir.model.access')
- read_access = ira.check(cr, user, self._name, 'write', raise_exception=False, context=context) or \
- ira.check(cr, user, self._name, 'create', raise_exception=False, context=context)
- return super(orm, self).fields_get(cr, user, fields, context, read_access)
+ write_access = ira.check(cr, user, self._name, 'write', raise_exception=False, context=context) or \
+ ira.check(cr, user, self._name, 'create', raise_exception=False, context=context)
+ return super(orm, self).fields_get(cr, user, fields, context, write_access)
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
"""
:param ids: id or list of the ids of the records to read
:param fields: optional list of field names to return (default: all fields would be returned)
:type fields: list (example ['field_name_1', ...])
- :param context(optional, highly recommended): context arguments, like lang, time zone
+ :param context: (optional) context arguments, like lang, time zone
:return: list of dictionaries((dictionary per record asked)) with requested field values
:rtype: [{‘name_of_the_field’: value, ...}, ...]
:raise AccessError: * if user has no read rights on the requested object
for r in result:
for key, v in r.items():
+ if v is None:
+ r[key] = False
if key in self._columns:
column = self._columns[key]
elif key in self._inherit_fields:
def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
if not context:
context = {}
- #ids = map(lambda x:int(x), ids)
if not ids:
return []
if fields_to_read == None:
fields_to_read = self._columns.keys()
- # construct a clause for the rules :
- d1, d2, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
+ # Construct a clause for the security rules.
+ # 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
+ # or will at least contain self._table.
+ rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
+
# all inherited fields + all non inherited fields for which the attribute whose name is in load is True
fields_pre = [f for f in fields_to_read if
f == self.CONCURRENCY_CHECK_FIELD
res = []
if len(fields_pre):
def convert_field(f):
+ f_qual = "%s.%s" % (self._table, f) # need fully-qualified references in case len(tables) > 1
if f in ('create_date', 'write_date'):
- return "date_trunc('second', %s) as %s" % (f, f)
+ return "date_trunc('second', %s) as %s" % (f_qual, f)
if f == self.CONCURRENCY_CHECK_FIELD:
if self._log_access:
return "COALESCE(%s.write_date, %s.create_date, now())::timestamp AS %s" % (self._table, self._table, f,)
return "now()::timestamp AS %s" % (f,)
if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
- return 'length("%s") as "%s"' % (f, f)
- return '"%s"' % (f,)
+ return 'length(%s) as "%s"' % (f_qual, f)
+ return f_qual
+
fields_pre2 = map(convert_field, fields_pre)
order_by = self._parent_order or self._order
+ select_fields = ','.join(fields_pre2 + [self._table + '.id'])
+ query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table)
+ if rule_clause:
+ query += " AND " + (' OR '.join(rule_clause))
+ query += " ORDER BY " + order_by
for sub_ids in cr.split_for_in_conditions(ids):
- if d1:
- cr.execute('SELECT %s FROM %s WHERE %s.id IN %%s AND %s ORDER BY %s' % \
- (','.join(fields_pre2 + [self._table + '.id']), ','.join(tables), self._table, ' and '.join(d1),
- order_by),[sub_ids,]+d2)
+ if rule_clause:
+ cr.execute(query, [tuple(sub_ids)] + rule_params)
if cr.rowcount != len(sub_ids):
raise except_orm(_('AccessError'),
_('You try to bypass an access rule while reading (Document type: %s).') % self._description)
else:
- cr.execute('SELECT %s FROM \"%s\" WHERE id IN %%s ORDER BY %s' %
- (','.join(fields_pre2 + ['id']), self._table,
- order_by), (sub_ids,))
+ cr.execute(query, (tuple(sub_ids),))
res.extend(cr.dictfetchall())
else:
res = map(lambda x: {'id': x}, ids)
if f == self.CONCURRENCY_CHECK_FIELD:
continue
if self._columns[f].translate:
- ids = map(lambda x: x['id'], res)
+ ids = [x['id'] for x in res]
#TODO: optimize out of this loop
res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context.get('lang', False) or 'en_US', ids)
for r in res:
for r in res:
for f in fields_post:
r[f] = self._columns[f]._symbol_get(r[f])
- ids = map(lambda x: x['id'], res)
+ ids = [x['id'] for x in res]
# all non inherited fields for which the attribute whose name is in load is False
fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read)
record[f] = res2[record['id']]
else:
record[f] = []
-
-#for f in fields_post:
-# # get the value of that field for all records/ids
-# res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
-# for record in res:
-# record[f] = res2[record['id']]
-
readonly = None
for vals in res:
for field in vals.copy():
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
- cr.execute("select count(*) from res_groups_users_rel where gid in (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
+ cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s" \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
:param user: current user id
:param ids: id or list of ids
:param context: context arguments, like lang, time zone
- :param details: if True, *_uid fields are replaced with the name of the user
+ :param details: if True, \*_uid fields are replaced with the name of the user
:return: list of ownership dictionaries for each requested record
:rtype: list of dictionaries with the following keys:
if not ids:
return []
fields = ''
+ uniq = isinstance(ids, (int, long))
+ if uniq:
+ ids = [ids]
+ fields = 'id'
if self._log_access:
- fields = ', u.create_uid, u.create_date, u.write_uid, u.write_date'
- if isinstance(ids, (int, long)):
- ids_str = str(ids)
- else:
- ids_str = string.join(map(lambda x: str(x), ids), ',')
- cr.execute('select u.id'+fields+' from "'+self._table+'" u where u.id in ('+ids_str+')')
+ fields += ', create_uid, create_date, write_uid, write_date'
+ query = 'SELECT %s FROM "%s" WHERE id IN %%s' % (fields, self._table)
+ cr.execute(query, (tuple(ids),))
res = cr.dictfetchall()
for r in res:
for key in r:
if key in ('write_uid', 'create_uid', 'uid') and details:
if r[key]:
r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0]
- if isinstance(ids, (int, long)):
- return res[ids]
+ if uniq:
+ return res[ids[0]]
return res
def _check_concurrency(self, cr, ids, context):
:param cr: database cursor
:param uid: current user id
:param ids: id or list of ids
- :param context(optional, highly recommended): context arguments, like lang, time zone
+ :param context: (optional) context arguments, like lang, time zone
:return: True
:raise AccessError: * if user has no unlink rights on the requested object
* if user tries to bypass access rules for unlink on the requested object
properties = self.pool.get('ir.property')
domain = [('res_id', '=', False),
- ('value', 'in', ['%s,%s' % (self._name, i) for i in ids]),
+ ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
]
if properties.search(cr, uid, domain, context=context):
raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
for oid in ids:
wf_service.trg_delete(uid, self._name, oid, cr)
- #cr.execute('select * from '+self._table+' where id in ('+str_d+')', ids)
- #res = cr.dictfetchall()
- #for key in self._inherits:
- # ids2 = [x[self._inherits[key]] for x in res]
- # self.pool.get(key).unlink(cr, uid, ids2)
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('delete from ' + self._table + ' ' \
- 'where id in %s', (sub_ids,))
+ 'where id IN %s', (sub_ids,))
for order, object, store_ids, fields in result_store:
if object != self._name:
obj = self.pool.get(object)
- cr.execute('select id from '+obj._table+' where id in ('+','.join(map(str, store_ids))+')')
+ cr.execute('select id from '+obj._table+' where id IN %s',(tuple(store_ids),))
rids = map(lambda x: x[0], cr.fetchall())
if rids:
obj._store_set_values(cr, uid, rids, fields, context)
:param cr: database cursor
:param user: current user id
- :type user: integer (example 1)
- :param ids: id or list of ids
- :param vals: dictionary of field values to update
- :type vals: dictionary (example {'field_name': 'value', ...})
- :param context(optional, highly recommended): context arguments, like lang, time zone
+ :type user: integer
+ :param ids: object id or list of object ids to update according to **vals**
+ :param vals: field values to update, e.g {'field_name': new_field_value, ...}
+ :type vals: dictionary
+ :param context: (optional) context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
+ :type context: dictionary
:return: True
:raise AccessError: * if user has no write rights on the requested object
* if user tries to bypass access rules for write on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
- :raise UserError: if recurssion is found
+ :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
+
+ **Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
+
+ + For a many2many field, a list of tuples is expected.
+ Here is the list of tuple that are accepted, with the corresponding semantics ::
+
+ (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
+ (1, ID, { values }) update the linked record with id = ID (write *values* on it)
+ (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
+ (3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
+ (4, ID) link to existing record with id = ID (adds a relationship)
+ (5) unlink all (like using (3,ID) for all linked records)
+ (6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
+
+ Example:
+ [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
- vals format for relational field type.
+ + For a one2many field, a lits of tuples is expected.
+ Here is the list of tuple that are accepted, with the corresponding semantics ::
- + many2many field : [(6, 0, list of ids)] (example: [(6, 0, [8, 5, 6, 4])])
- + one2many field : [(0, 0, dictionary of values)] (example: [(0, 0, {'field_name':field_value, ...})])
- + many2one field : ID of related record
- + reference field : model name, id (example: 'product.product, 5')
+ (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
+ (1, ID, { values }) update the linked record with id = ID (write *values* on it)
+ (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
+
+ Example:
+ [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
+
+ + For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
+ + For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
"""
readonly = None
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
- cr.execute("select count(*) from res_groups_users_rel where gid in (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
+ cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s" \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
ids = [ids]
self._check_concurrency(cr, ids, context)
-
self.pool.get('ir.model.access').check(cr, user, self._name, 'write', context=context)
+ result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
+
+ # No direct update of parent_left/right
+ vals.pop('parent_left', None)
+ vals.pop('parent_right', None)
+
+ parents_changed = []
+ if self._parent_store and (self._parent_name in vals):
+ # The parent_left/right computation may take up to
+ # 5 seconds. No need to recompute the values if the
+ # parent is the same. Get the current value of the parent
+ parent_val = vals[self._parent_name]
+ if parent_val:
+ query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL)" % \
+ (self._table, self._parent_name, self._parent_name)
+ cr.execute(query, (tuple(ids), parent_val))
+ else:
+ query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL)" % \
+ (self._table, self._parent_name)
+ cr.execute(query, (tuple(ids),))
+ parents_changed = map(operator.itemgetter(0), cr.fetchall())
upd0 = []
upd1 = []
self.check_access_rule(cr, user, ids, 'write', context=context)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
- 'where id in %s', upd1 + [sub_ids])
+ 'where id IN %s', upd1 + [sub_ids])
if totranslate:
# TODO: optimize
if c[0].startswith('default_'):
del rel_context[c[0]]
- result = []
for field in upd_todo:
for id in ids:
result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
nids = []
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
- 'where id in %s', (sub_ids,))
+ 'where id IN %s', (sub_ids,))
nids.extend([x[0] for x in cr.fetchall()])
v = {}
self.pool.get(table).write(cr, user, nids, v, context)
self._validate(cr, user, ids, context)
-# TODO: use _order to set dest at the right position and not first node of parent
- if self._parent_store and (self._parent_name in vals):
+
+ # TODO: use _order to set dest at the right position and not first node of parent
+ # We can't defer parent_store computation because the stored function
+ # fields that are computer may refer (directly or indirectly) to
+ # parent_left/right (via a child_of domain)
+ if parents_changed:
if self.pool._init:
self.pool._init_parent[self._name]=True
else:
- for id in ids:
+ order = self._parent_order or self._order
+ parent_val = vals[self._parent_name]
+ if parent_val:
+ clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
+ else:
+ clause, params = '%s IS NULL' % (self._parent_name,), ()
+ cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, order), params)
+ parents = cr.fetchall()
+
+ for id in parents_changed:
+ cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
+ pleft, pright = cr.fetchone()
+ distance = pright - pleft + 1
+
# Find Position of the element
- if vals[self._parent_name]:
- cr.execute('select parent_left,parent_right,id from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (vals[self._parent_name],))
- else:
- cr.execute('select parent_left,parent_right,id from '+self._table+' where '+self._parent_name+' is null order by '+(self._parent_order or self._order))
- result_p = cr.fetchall()
position = None
- for (pleft,pright,pid) in result_p:
- if pid == id:
+ for (parent_pright, parent_id) in parents:
+ if parent_id == id:
break
- position = pright+1
+ position = parent_pright+1
- # It's the first node of the parent: position = parent_left+1
+ # It's the first node of the parent
if not position:
- if not vals[self._parent_name]:
+ if not parent_val:
position = 1
else:
- cr.execute('select parent_left from '+self._table+' where id=%s', (vals[self._parent_name],))
+ cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
position = cr.fetchone()[0]+1
- # We have the new position !
- cr.execute('select parent_left,parent_right from '+self._table+' where id=%s', (id,))
- pleft,pright = cr.fetchone()
- distance = pright - pleft + 1
-
- if position>pleft and position<=pright:
+ if pleft < position <= pright:
raise except_orm(_('UserError'), _('Recursivity Detected.'))
- if pleft<position:
+ if pleft < position:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft,position-pleft, pleft, pright))
cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance,pleft-position+distance, pleft+distance, pright+distance))
result += self._store_get_values(cr, user, ids, vals.keys(), context)
+ result.sort()
+
+ done = {}
for order, object, ids, fields in result:
- self.pool.get(object)._store_set_values(cr, user, ids, fields, context)
+ key = (object,tuple(fields))
+ done.setdefault(key, {})
+ # avoid to do several times the same computation
+ todo = []
+ for id in ids:
+ if id not in done[key]:
+ done[key][id] = True
+ todo.append(id)
+ self.pool.get(object)._store_set_values(cr, user, todo, fields, context)
wf_service = netsvc.LocalService("workflow")
for id in ids:
:param cr: database cursor
:param user: current user id
- :type user: integer (example 1)
- :param vals: dictionary for new record {'field_name': field_value, ...}
- :type vals: dictionary (example {'field_name': field_value, ...})
- :param context(optional, highly recommended): context arguments, like lang, time zone
- :type context: dictionary (example {'lang': 'en_us', ...})
+ :type user: integer
+ :param vals: field values for new record, e.g {'field_name': field_value, ...}
+ :type vals: dictionary
+ :param context: optional context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
+ :type context: dictionary
:return: id of new record created
:raise AccessError: * if user has no create rights on the requested object
* if user tries to bypass access rules for create on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
+ :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
- vals format for relational field type.
-
- + many2many field : [(6, 0, list of ids)] (example: [(6, 0, [8, 5, 6, 4])])
- + one2many field : [(0, 0, dictionary of values)] (example: [(0, 0, {'field_name':field_value, ...})])
- + many2one field : ID of related record
- + reference field : model name, id (example: 'product.product, 5')
+ **Note**: The type of field values to pass in ``vals`` for relationship fields is specific.
+ Please see the description of the :py:meth:`~osv.osv.osv.write` method for details about the possible values and how
+ to specify them.
"""
if not context:
context = {}
- self.pool.get('ir.model.access').check(cr, user, self._name, 'create')
+ self.pool.get('ir.model.access').check(cr, user, self._name, 'create', context=context)
default = []
if default_values[dv] and isinstance(default_values[dv][0], (int, long)):
default_values[dv] = [(6, 0, default_values[dv])]
- vals.update(default_values)
+ # override defaults with the provided values, never allow the other way around
+ default_values.update(vals)
+ vals = default_values
tocreate = {}
for v in self._inherits:
record_id = tocreate[table].pop('id', None)
- if record_id is None:
+ if record_id is None or not record_id:
record_id = self.pool.get(table).create(cr, user, tocreate[table], context=context)
else:
self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=context)
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
- cr.execute("select count(*) from res_groups_users_rel where gid in (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
+ cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
upd1 += ',%s,now()'
upd2.append(user)
cr.execute('insert into "'+self._table+'" (id'+upd0+") values ("+str(id_new)+upd1+')', tuple(upd2))
- d1, d2, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'create', context=context)
- if d1:
- d1 = ' AND '+' AND '.join(d1)
- cr.execute('SELECT '+self._table+'.id FROM '+','.join(tables)+' ' \
- 'WHERE '+self._table+'.id = ' +str(id_new)+d1,d2)
- if not cr.rowcount:
- raise except_orm(_('AccessError'),
- _('You try to bypass an access rule to create (Document type: %s).') \
- % self._name)
+ self.check_access_rule(cr, user, [id_new], 'create', context=context)
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
- if self._parent_store:
+ if self._parent_store and not context.get('defer_parent_store_computation'):
if self.pool._init:
self.pool._init_parent[self._name]=True
else:
self.pool.get(object)._store_set_values(cr, user, ids, fields2, context)
done.append((object, ids, fields2))
+ if self._log_create and not (context and context.get('no_store_function', False)):
+ message = self._description + \
+ " '" + \
+ self.name_get(cr, user, [id_new], context=context)[0][1] + \
+ "' "+ _("created.")
+ self.log(cr, user, id_new, message, True, context=context)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_create(user, self._name, id_new, cr)
return id_new
return result2
def _store_set_values(self, cr, uid, ids, fields, context):
+ if not ids:
+ return True
field_flag = False
field_dict = {}
if self._log_access:
- cr.execute('select id,write_date from '+self._table+' where id in ('+','.join(map(str, ids))+')')
+ cr.execute('select id,write_date from '+self._table+' where id IN %s',(tuple(ids),))
res = cr.fetchall()
for r in res:
if r[1]:
# TODO: Validate
#
def perm_write(self, cr, user, ids, fields, context=None):
- raise _('This method does not exist anymore')
+ raise NotImplementedError(_('This method does not exist anymore'))
# TODO: ameliorer avec NULL
def _where_calc(self, cr, user, args, active_test=True, context=None):
+ """Computes the WHERE clause needed to implement an OpenERP domain.
+ :param args: the domain to compute
+ :type args: list
+ :param active_test: whether the default filtering of records with ``active``
+ field set to ``False`` should be applied.
+ :return: tuple with 3 elements: (where_clause, where_clause_params, tables) where
+ ``where_clause`` contains a list of where clause elements (to be joined with 'AND'),
+ ``where_clause_params`` is a list of parameters to be passed to the db layer
+ for the where_clause expansion, and ``tables`` is the list of double-quoted
+ table names that need to be included in the FROM clause.
+ :rtype: tuple
+ """
if not context:
context = {}
args = args[:]
raise except_orm(_('AccessError'), _('Bad query.'))
return True
+ def _apply_ir_rules(self, cr, uid, where_clause, where_clause_params, tables, mode='read', model_name=None, context=None):
+ """Add what's missing in ``where_clause``, ``where_params``, ``tables`` to implement
+ all appropriate ir.rules (on the current object but also from it's _inherits parents)
+
+ :param where_clause: list with current elements of the WHERE clause (strings)
+ :param where_clause_params: list with parameters for ``where_clause``
+ :param tables: list with double-quoted names of the tables that are joined
+ in ``where_clause``
+ :param model_name: optional name of the model whose ir.rules should be applied (default:``self._name``)
+ This could be useful for inheritance for example, but there is no provision to include
+ the appropriate JOIN for linking the current model to the one referenced in model_name.
+ :return: True if additional clauses where applied.
+ """
+ added_clause, added_params, added_tables = self.pool.get('ir.rule').domain_get(cr, uid, model_name or self._name, mode, context=context)
+ if added_clause:
+ where_clause += added_clause
+ where_clause_params += added_params
+ for table in added_tables:
+ if table not in tables:
+ tables.append(table)
+ return True
+ return False
+
def search(self, cr, user, args, offset=0, limit=None, order=None,
context=None, count=False):
"""
- Search for record/s with or without domain
+ Search for record/s based on a search domain.
:param cr: database cursor
:param user: current user id
- :param args: list of tuples specifying search criteria [('field_name', 'operator', 'value'), ...]
- :param offset: optional number from search starts
- :param limit: optional max number of records to return
+ :param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
+ :param offset: optional number of results to skip in the returned values (default: 0)
+ :param limit: optional max number of records to return (default: **None**)
:param order: optional columns to sort by (default: self._order=id )
- :param context(optional, highly recommended): context arguments, like lang, time zone
- :param count: if True, returns only the number of records matching the criteria, not their ids
+ :param context: optional context arguments, like lang, time zone
+ :type context: dictionary
+ :param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
:return: id or list of ids of records matching the criteria
:rtype: integer or list of integers
:raise AccessError: * if user tries to bypass access rules for read on the requested object.
- Operators:
- * =, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right
- Prefix operators:
- * '&' (default), '|', '!'
+ **Expressing a search domain (args)**
+
+ Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
+
+ * **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
+ * **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
+ The semantics of most of these operators are obvious.
+ The ``child_of`` operator will look for records who are children or grand-children of a given record,
+ according to the semantics of this model (i.e following the relationship field named by
+ ``self._parent_name``, by default ``parent_id``.
+ * **value** must be a valid value to compare with the values of **field_name**, depending on its type.
+
+ Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
+ These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
+ Be very careful about this when you combine them the first time.
+
+ Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
+
+ [('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
+
+ The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
+
+ (name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
"""
- if not context:
+ if context is None:
context = {}
+ self.pool.get('ir.model.access').check(cr, user, self._name, 'read', context=context)
# compute the where, order by, limit and offset clauses
- (qu1, qu2, tables) = self._where_calc(cr, user, args, context=context)
- dom = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
- qu1 = qu1 + dom[0]
- qu2 = qu2 + dom[1]
- for t in dom[2]:
- if t not in tables:
- tables.append(t)
-
- if len(qu1):
- qu1 = ' where '+string.join(qu1, ' and ')
- else:
- qu1 = ''
+ (where_clause, where_clause_params, tables) = self._where_calc(cr, user, args, context=context)
+
+ # apply direct ir.rules from current model
+ self._apply_ir_rules(cr, user, where_clause, where_clause_params, tables, 'read', context=context)
+ # then apply the ir.rules from the parents (through _inherits), adding the appropriate JOINs if needed
+ for inherited_model in self._inherits:
+ previous_tables = list(tables)
+ if self._apply_ir_rules(cr, user, where_clause, where_clause_params, tables, 'read', model_name=inherited_model, context=context):
+ # if some rules were applied, need to add the missing JOIN for them to make sense, passing the previous
+ # list of table in case the inherited table was not in the list before (as that means the corresponding
+ # JOIN(s) was(were) not present)
+ self._inherits_join_add(inherited_model, previous_tables, where_clause)
+ tables = list(set(tables).union(set(previous_tables)))
+
+ where = where_clause
order_by = self._order
if order:
self._check_qorder(order)
o = order.split(' ')[0]
- if (o in self._columns) and getattr(self._columns[o], '_classic_write'):
- order_by = order
+ if (o in self._columns):
+ # we can only do efficient sort if the fields is stored in database
+ if getattr(self._columns[o], '_classic_read'):
+ order_by = order
+ elif (o in self._inherit_fields):
+ parent_obj = self.pool.get(self._inherit_fields[o][0])
+ if getattr(parent_obj._columns[o], '_classic_read'):
+ # Allowing inherits'ed field for server side sorting, if they can be sorted by the dbms
+ inherited_tables, inherit_join, order_by = self._inherits_join_calc(o, tables, where_clause)
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
-
+
+ if where:
+ where_str = " WHERE %s" % " AND ".join(where)
+ else:
+ where_str = ""
if count:
cr.execute('select count(%s.id) from ' % self._table +
- ','.join(tables) +qu1 + limit_str + offset_str, qu2)
+ ','.join(tables) + where_str + limit_str + offset_str, where_clause_params)
res = cr.fetchall()
return res[0][0]
- cr.execute('select %s.id from ' % self._table + ','.join(tables) +qu1+' order by '+order_by+limit_str+offset_str, qu2)
+ cr.execute('select %s.id from ' % self._table + ','.join(tables) + where_str +' order by '+order_by+limit_str+offset_str, where_clause_params)
res = cr.fetchall()
return [x[0] for x in res]
:param cr: database cursor
:param user: current user id
- :type user: integer (example 1)
+ :type user: integer
:param ids: list of ids
:param context: context arguments, like lang, time zone
+ :type context: dictionary
:return: tuples with the text representation of requested objects for to-many relationships
"""
return [(r['id'], tools.ustr(r[self._rec_name])) for r in self.read(cr, user, ids,
[self._rec_name], context, load='_classic_write')]
+ # private implementation of name_search, allows passing a dedicated user for the name_get part to
+ # solve some access rights issues
+ def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
+ if args is None:
+ args = []
+ if context is None:
+ context = {}
+ args = args[:]
+ if name:
+ args += [(self._rec_name, operator, name)]
+ ids = self.search(cr, user, args, limit=limit, context=context)
+ res = self.name_get(cr, name_get_uid or user, ids, context)
+ return res
+
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
"""
:param args: list of tuples specifying search criteria [('field_name', 'operator', 'value'), ...]
:param operator: operator for search criterion
:param context: context arguments, like lang, time zone
+ :type context: dictionary
:param limit: optional max number of records to return
:return: list of object names matching the search criteria, used to provide completion for to-many relationships
- This method is equivalent of search() on name + name_get()
+ This method is equivalent of :py:meth:`~osv.osv.osv.search` on **name** + :py:meth:`~osv.osv.osv.name_get` on the result.
+ See :py:meth:`~osv.osv.osv.search` for an explanation of the possible values for the search domain specified in **args**.
"""
- if not args:
- args = []
- if not context:
- context = {}
- args = args[:]
- if name:
- args += [(self._rec_name, operator, name)]
- ids = self.search(cr, user, args, limit=limit, context=context)
- res = self.name_get(cr, user, ids, context)
- return res
+ return self._name_search(cr, user, name, args, operator, context, limit)
def copy_data(self, cr, uid, id, default=None, context=None):
"""
:param cr: database cursor
:param user: current user id
- :param ids: id of the record to copy
- :param default: dictionary of field values to update before saving the duplicate object
+ :param id: id of the record to copy
+ :param default: field values to override in the original values of the copied record
+ :type default: dictionary
:param context: context arguments, like lang, time zone
+ :type context: dictionary
:return: dictionary containing all the field values
"""
- if not context:
+ if context is None:
context = {}
- if not default:
+ if default is None:
default = {}
if 'state' not in default:
if 'state' in self._defaults:
else:
default['state'] = self._defaults['state']
- data = self.read(cr, uid, [id], context=context)[0]
+ context_wo_lang = context
+ if 'lang' in context:
+ del context_wo_lang['lang']
+ data = self.read(cr, uid, [id], context=context_wo_lang)[0]
+
fields = self.fields_get(cr, uid, context=context)
- trans_data=[]
for f in fields:
ftype = fields[f]['type']
elif ftype in ('one2many', 'one2one'):
res = []
rel = self.pool.get(fields[f]['relation'])
- if data[f] != False:
+ if data[f]:
+ # duplicate following the order of the ids
+ # because we'll rely on it later for copying
+ # translations in copy_translation()!
+ data[f].sort()
for rel_id in data[f]:
# the lines are first duplicated using the wrong (old)
# parent but then are reassigned to the correct one thanks
- # to the (4, ...)
- d,t = rel.copy_data(cr, uid, rel_id, context=context)
+ # to the (0, 0, ...)
+ d = rel.copy_data(cr, uid, rel_id, context=context)
res.append((0, 0, d))
- trans_data += t
data[f] = res
elif ftype == 'many2many':
data[f] = [(6, 0, data[f])]
+ del data['id']
+
+ # make sure we don't break the current parent_store structure and
+ # force a clean recompute!
+ for parent_column in ['parent_left', 'parent_right']:
+ data.pop(parent_column, None)
+
+ for v in self._inherits:
+ del data[self._inherits[v]]
+ return data
+
+ def copy_translations(self, cr, uid, old_id, new_id, context=None):
trans_obj = self.pool.get('ir.translation')
- #TODO: optimize translations
- trans_name=''
- for f in fields:
- trans_flag=True
- if f in self._columns and self._columns[f].translate:
- trans_name=self._name+","+f
- elif f in self._inherit_fields and self._inherit_fields[f][2].translate:
- trans_name=self._inherit_fields[f][0]+","+f
- else:
- trans_flag=False
+ fields = self.fields_get(cr, uid, context=context)
- if trans_flag:
- trans_ids = trans_obj.search(cr, uid, [
- ('name', '=', trans_name),
- ('res_id','=',data['id'])
+ translation_records = []
+ for field_name, field_def in fields.items():
+ # we must recursively copy the translations for o2o and o2m
+ if field_def['type'] in ('one2one', 'one2many'):
+ target_obj = self.pool.get(field_def['relation'])
+ old_record, new_record = self.read(cr, uid, [old_id, new_id], [field_name], context=context)
+ # here we rely on the order of the ids to match the translations
+ # as foreseen in copy_data()
+ old_childs = sorted(old_record[field_name])
+ new_childs = sorted(new_record[field_name])
+ for (old_child, new_child) in zip(old_childs, new_childs):
+ # recursive copy of translations here
+ target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
+ # and for translatable fields we keep them for copy
+ elif field_def.get('translate'):
+ trans_name = ''
+ if field_name in self._columns:
+ trans_name = self._name + "," + field_name
+ elif field_name in self._inherit_fields:
+ trans_name = self._inherit_fields[field_name][0] + "," + field_name
+ if trans_name:
+ trans_ids = trans_obj.search(cr, uid, [
+ ('name', '=', trans_name),
+ ('res_id','=', old_id)
])
+ translation_records.extend(trans_obj.read(cr,uid,trans_ids,context=context))
- trans_data.extend(trans_obj.read(cr,uid,trans_ids,context=context))
-
- del data['id']
+ for record in translation_records:
+ del record['id']
+ record['res_id'] = new_id
+ trans_obj.create(cr, uid, record, context=context)
- for v in self._inherits:
- del data[self._inherits[v]]
- return data, trans_data
def copy(self, cr, uid, id, default=None, context=None):
"""
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
- :param default: dictionary of field values to update before saving the duplicate object
- :type default: dictionary (example {'field_name': field_value, ...})
+ :param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
+ :type default: dictionary
:param context: context arguments, like lang, time zone
+ :type context: dictionary
:return: True
"""
- trans_obj = self.pool.get('ir.translation')
- data, trans_data = self.copy_data(cr, uid, id, default, context)
+ data = self.copy_data(cr, uid, id, default, context)
new_id = self.create(cr, uid, data, context)
- for record in trans_data:
- del record['id']
- record['res_id'] = new_id
- trans_obj.create(cr, uid, record, context)
+ self.copy_translations(cr, uid, id, new_id, context)
return new_id
- def exists(self, cr, uid, id, context=None):
- cr.execute('SELECT count(1) FROM "%s" where id=%%s' % (self._table,), (id,))
- return bool(cr.fetchone()[0])
+ def exists(self, cr, uid, ids, context=None):
+ if type(ids) in (int,long):
+ ids = [ids]
+ query = 'SELECT count(1) FROM "%s"' % (self._table)
+ cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
+ return cr.fetchone()[0] == len(ids)
def check_recursion(self, cr, uid, ids, parent=None):
"""
- Check recursion in records
+ Verifies that there is no loop in a hierarchical structure of records,
+ by following the parent relationship using the **parent** field until a loop
+ is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
- :param ids: list of ids of records
- :param parent: parent field name
- :return: True or False based on recursion detection
+ :param ids: list of ids of records to check
+ :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
+ :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
if not parent:
parent = self._parent_name
ids_parent = ids[:]
- while len(ids_parent):
+ query = 'SELECT distinct "%s" FROM "%s" WHERE id IN %%s' % (parent, self._table)
+ while ids_parent:
ids_parent2 = []
for i in range(0, len(ids), cr.IN_MAX):
sub_ids_parent = ids_parent[i:i+cr.IN_MAX]
- cr.execute('SELECT distinct "'+parent+'"'+
- ' FROM "'+self._table+'" ' \
- 'WHERE id = ANY(%s)',(sub_ids_parent,))
+ cr.execute(query, (tuple(sub_ids_parent),))
ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
ids_parent = ids_parent2
for i in ids_parent:
return False
return True
+ def get_xml_id(self, cr, uid, ids, *args, **kwargs):
+ """Find out the XML ID of any database record, if there
+ is one. This method works as a possible implementation
+ for a function field, to be able to add it to any
+ model object easily, referencing it as ``osv.osv.get_xml_id``.
+
+ **Synopsis**: ``get_xml_id(cr, uid, ids) -> { 'id': 'module.xml_id' }``
+
+ :return: the fully qualified XML ID of the given object,
+ defaulting to an empty string when there's none
+ (to be usable as a function field).
+ """
+ result = dict.fromkeys(ids, '')
+ model_data_obj = self.pool.get('ir.model.data')
+ data_ids = model_data_obj.search(cr,uid,
+ [('model','=',self._name),('res_id','in',ids)])
+ data_results = model_data_obj.read(cr,uid,data_ids,
+ ['name','module','res_id'])
+ for record in data_results:
+ result[record['res_id']] = '%(module)s.%(name)s' % record
+ return result
+
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: