##############################################################################
#
# OpenERP, Open Source Management Solution
-# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
+# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
#
import time
+import calendar
import types
import string
import netsvc
regex_order = re.compile('^([a-zA-Z0-9_]+( desc)?( asc)?,?)+$', re.I)
+def last_day_of_current_month():
+ import datetime
+ import calendar
+ today = datetime.date.today()
+ last_day = str(calendar.monthrange(today.year, today.month)[1])
+ return time.strftime('%Y-%m-' + last_day)
def intersect(la, lb):
return filter(lambda x: x in lb, la)
self.id = False
def __getitem__(self, name):
- return False
+ return None
def __getattr__(self, name):
- return False # XXX: return self ?
+ return None # XXX: return self ?
def __int__(self):
return False
def __nonzero__(self):
return False
+
+ def __unicode__(self):
+ return u''
#
def __init__(self, cr, uid, id, table, cache, context=None, list_class = None, fields_process={}):
'''
table : the object (inherited from orm)
- context : a dictionnary with an optionnal context
+ context : a dictionary with an optional context
'''
if not context:
context = {}
cache.setdefault(table._name, {})
self._data = cache[table._name]
- if not id in self._data:
+ if id not in self._data:
self._data[id] = {'id': id}
self._cache = cache
- pass
def __getitem__(self, name):
if name == 'id':
return self._id
- if not name in self._data[self._id]:
+ if name not in self._data[self._id]:
# build the list of fields we will fetch
# fetch the definition of the field which was asked for
col = self._table._columns[name]
elif name in self._table._inherit_fields:
col = self._table._inherit_fields[name][2]
- elif hasattr(self._table, name):
+ elif hasattr(self._table, str(name)):
if isinstance(getattr(self._table, name), (types.MethodType, types.LambdaType, types.FunctionType)):
return lambda *args, **argv: getattr(self._table, name)(self._cr, self._uid, [self._id], *args, **argv)
else:
else:
logger = netsvc.Logger()
logger.notifyChannel('orm', netsvc.LOG_ERROR, "Programming error: field '%s' does not exist in object '%s' !" % (name, self._table._name))
- return False
+ return None
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
if col._classic_write:
# otherwise we fetch only that field
else:
ffields = [(name, col)]
- ids = filter(lambda id: not name in self._data[id], self._data.keys())
+ ids = filter(lambda id: name not in self._data[id], self._data.keys())
# read the data
fffields = map(lambda x: x[0], ffields)
datas = self._table.read(self._cr, self._uid, ids, fffields, context=self._context, load="_classic_write")
if f._type in self._fields_process:
for d in datas:
d[n] = self._fields_process[f._type](d[n])
- d[n].set_value(d[n], self, f)
+ if d[n]:
+ d[n].set_value(self._cr, self._uid, d[n], self, f)
+ if not datas:
+ # Where did those ids come from? Perhaps old entries in ir_model_data?
+ raise except_orm('NoDataError', 'Field %s in %s%s'%(name,self._table_name,str(ids)))
# create browse records for 'remote' objects
for data in datas:
for n, f in ffields:
elif f._type in ('one2many', 'many2many') and len(data[n]):
data[n] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(f._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in data[n]], self._context)
self._data[data['id']].update(data)
+ if not name in self._data[self._id]:
+ #how did this happen?
+ logger = netsvc.Logger()
+ logger.notifyChannel("browse_record", netsvc.LOG_ERROR,"Ffields: %s, datas: %s"%(str(fffields),str(datas)))
+ logger.notifyChannel("browse_record", netsvc.LOG_ERROR,"Data: %s, Table: %s"%(str(self._data[self._id]),str(self._table)))
+ raise AttributeError(_('Unknown attribute %s in %s ') % (str(name),self._table_name))
return self._data[self._id][name]
def __getattr__(self, name):
f_type = ('float8', 'DOUBLE PRECISION')
elif isinstance(f, fields.function) and f._type == 'selection':
f_type = ('text', 'text')
+ elif isinstance(f, fields.function) and f._type == 'char':
+ f_type = ('varchar', 'VARCHAR(%d)' % (f.size))
else:
logger = netsvc.Logger()
logger.notifyChannel("init", netsvc.LOG_WARNING, '%s type not supported!' % (type(f)))
_rec_name = 'name'
_parent_name = 'parent_id'
_parent_store = False
+ _parent_order = False
_date_name = 'date'
_order = 'id'
_sequence = None
_inherits = {}
_table = None
_invalids = set()
+
+ CONCURRENCY_CHECK_FIELD = '__last_update'
def _field_create(self, cr, context={}):
- cr.execute("SELECT id FROM ir_model WHERE model='%s'" % self._name)
+ cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
if not cr.rowcount:
cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
model_id = cr.fetchone()[0]
- cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s,%s)", (model_id, self._name, self._description, self.__doc__, 'base'))
- if 'module' in context:
- cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, now(), now(), %s, %s, %s)", \
- ('model_'+self._name.replace('.','_'), context['module'], 'ir.model', model_id)
- )
+ cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
else:
model_id = cr.fetchone()[0]
+ if 'module' in context:
+ name_id = 'model_'+self._name.replace('.','_')
+ cr.execute('select * from ir_model_data where name=%s and res_id=%s', (name_id,model_id))
+ if not cr.rowcount:
+ cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, now(), now(), %s, %s, %s)", \
+ (name_id, context['module'], 'ir.model', model_id)
+ )
+
cr.commit()
cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
id, model_id, model, name, field_description, ttype,
relation,view_load,state,select_level
) VALUES (
- %d,%s,%s,%s,%s,%s,%s,%s,%s,%s
+ %s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], bool(vals['view_load']), 'base',
break
i += 1
if i == len(f):
- data[fpos] = str(r or '')
+ data[fpos] = tools.ustr(r or '')
return [data] + lines
def export_data(self, cr, uid, ids, fields, context=None):
datas += self.__export_row(cr, uid, row, fields, context)
return datas
- def import_data(self, cr, uid, fields, datas, mode='init',
- current_module=None, noupdate=False, context=None, filename=None):
+ def import_data(self, cr, uid, fields, datas, mode='init', current_module=None, noupdate=False, context=None, filename=None):
if not context:
context = {}
fields = map(lambda x: x.split('/'), fields)
if line[i]:
if fields_def[field[len(prefix)][:-3]]['type']=='many2many':
res_id = []
- for word in line[i].split(','):
+ for word in line[i].split(config.get('csv_internal_sep')):
if '.' in word:
module, xml_id = word.rsplit('.', 1)
else:
module, xml_id = current_module, line[i]
ir_model_data_obj = self.pool.get('ir.model.data')
id = ir_model_data_obj._get_id(cr, uid, module, xml_id)
- res_id = ir_model_data_obj.read(cr, uid, [id],
- ['res_id'])[0]['res_id']
+ res_res_id = ir_model_data_obj.read(cr, uid, [id],
+ ['res_id'])
+ if res_res_id:
+ res_id = res_res_id[0]['res_id']
row[field[0][:-3]] = res_id or False
continue
if (len(field) == len(prefix)+1) and \
res = []
if line[i]:
relation = fields_def[field[len(prefix)]]['relation']
- for word in line[i].split(','):
+ for word in line[i].split(config.get('csv_internal_sep')):
res2 = self.pool.get(relation).name_search(cr,
uid, word, [], operator='=')
res3 = (res2 and res2[0][0]) or False
if not fun(self, cr, uid, ids):
translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, source=msg) or msg
error_msgs.append(
- _("Error occured while validating the field(s) %s: %s") % (','.join(fields), translated_msg)
+ _("Error occurred while validating the field(s) %s: %s") % (','.join(fields), translated_msg)
)
self._invalids.update(fields)
if error_msgs:
translation_obj = self.pool.get('ir.translation')
model_access_obj = self.pool.get('ir.model.access')
for parent in self._inherits:
- res.update(self.pool.get(parent).fields_get(cr, user, fields,
- context))
- for f in self._columns.keys():
- if fields and f not in fields:
- continue
- res[f] = {'type': self._columns[f]._type}
- for arg in ('string', 'readonly', 'states', 'size', 'required',
- 'change_default', 'translate', 'help', 'select'):
- if getattr(self._columns[f], arg):
- res[f][arg] = getattr(self._columns[f], arg)
- if not read_access:
- res[f]['readonly'] = True
- res[f]['states'] = {}
- for arg in ('digits', 'invisible','filters'):
- if hasattr(self._columns[f], arg) \
- and getattr(self._columns[f], arg):
- res[f][arg] = getattr(self._columns[f], arg)
-
- # translate the field label
- res_trans = translation_obj._get_source(cr, user,
- self._name + ',' + f, 'field', context.get('lang', False) or 'en_US')
- if res_trans:
- res[f]['string'] = res_trans
- help_trans = translation_obj._get_source(cr, user,
- self._name + ',' + f, 'help', context.get('lang', False) or 'en_US')
- if help_trans:
- res[f]['help'] = help_trans
-
- if hasattr(self._columns[f], 'selection'):
- if isinstance(self._columns[f].selection, (tuple, list)):
- sel = self._columns[f].selection
- # translate each selection option
- sel2 = []
- for (key, val) in sel:
- val2 = None
- if val:
- val2 = translation_obj._get_source(cr, user,
- self._name + ',' + f, 'selection',
- context.get('lang', False) or 'en_US', val)
- sel2.append((key, val2 or val))
- sel = sel2
- res[f]['selection'] = sel
- else:
- # call the 'dynamic selection' function
- res[f]['selection'] = self._columns[f].selection(self, cr,
- user, context)
- if res[f]['type'] in ('one2many', 'many2many',
- 'many2one', 'one2one'):
- res[f]['relation'] = self._columns[f]._obj
- res[f]['domain'] = self._columns[f]._domain
- res[f]['context'] = self._columns[f]._context
-
+ res.update(self.pool.get(parent).fields_get(cr, user, fields, context))
+
+ if self._columns.keys():
+ for f in self._columns.keys():
+ if fields and f not in fields:
+ continue
+ res[f] = {'type': self._columns[f]._type}
+ for arg in ('string', 'readonly', 'states', 'size', 'required',
+ 'change_default', 'translate', 'help', 'select'):
+ if getattr(self._columns[f], arg):
+ res[f][arg] = getattr(self._columns[f], arg)
+ if not read_access:
+ res[f]['readonly'] = True
+ res[f]['states'] = {}
+ for arg in ('digits', 'invisible','filters'):
+ if hasattr(self._columns[f], arg) \
+ and getattr(self._columns[f], arg):
+ res[f][arg] = getattr(self._columns[f], arg)
+
+ res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context.get('lang', False) or 'en_US')
+ if res_trans:
+ res[f]['string'] = res_trans
+ help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context.get('lang', False) or 'en_US')
+ if help_trans:
+ res[f]['help'] = help_trans
+
+ if hasattr(self._columns[f], 'selection'):
+ if isinstance(self._columns[f].selection, (tuple, list)):
+ sel = self._columns[f].selection
+ # translate each selection option
+ sel2 = []
+ for (key, val) in sel:
+ val2 = None
+ if val:
+ val2 = translation_obj._get_source(cr, user, self._name + ',' + f, 'selection', context.get('lang', False) or 'en_US', val)
+ sel2.append((key, val2 or val))
+ sel = sel2
+ res[f]['selection'] = sel
+ else:
+ # call the 'dynamic selection' function
+ res[f]['selection'] = self._columns[f].selection(self, cr,
+ user, context)
+ if res[f]['type'] in ('one2many', 'many2many', 'many2one', 'one2one'):
+ res[f]['relation'] = self._columns[f]._obj
+ res[f]['domain'] = self._columns[f]._domain
+ res[f]['context'] = self._columns[f]._context
+ else:
+ #TODO : read the fields from the database
+ pass
+
if fields:
# filter out fields which aren't in the fields list
for r in res.keys():
def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
return False
- def __view_look_dom(self, cr, user, node, context=None):
+ def __view_look_dom(self, cr, user, node, view_id, context=None):
if not context:
context = {}
result = False
node.removeChild(f)
ctx = context.copy()
ctx['base_model_name'] = self._name
- xarch, xfields = self.pool.get(relation).__view_look_dom_arch(cr, user, f, ctx)
+ xarch, xfields = self.pool.get(relation).__view_look_dom_arch(cr, user, f, view_id, ctx)
views[str(f.localName)] = {
'arch': xarch,
'fields': xfields
elif node.nodeType==node.ELEMENT_NODE and node.localName in ('form', 'tree'):
result = self.view_header_get(cr, user, False, node.localName, context)
if result:
- node.setAttribute('string', result.decode('utf-8'))
+ node.setAttribute('string', result)
elif node.nodeType==node.ELEMENT_NODE and node.localName == 'calendar':
for additional_field in ('date_start', 'date_delay', 'date_stop', 'color'):
# translate view
if ('lang' in context) and not result:
if node.hasAttribute('string') and node.getAttribute('string'):
- trans = tools.translate(cr, self._name, 'view', context['lang'], node.getAttribute('string').encode('utf8'))
+ trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.getAttribute('string').encode('utf8'))
if not trans and ('base_model_name' in context):
- trans = tools.translate(cr, context['base_model_name'], 'view', context['lang'], node.getAttribute('string').encode('utf8'))
+ trans = self.pool.get('ir.translation')._get_source(cr, user, context['base_model_name'], 'view', context['lang'], node.getAttribute('string').encode('utf8'))
if trans:
- node.setAttribute('string', trans.decode('utf8'))
+ node.setAttribute('string', trans)
if node.hasAttribute('sum') and node.getAttribute('sum'):
- trans = tools.translate(cr, self._name, 'view', context['lang'], node.getAttribute('sum').encode('utf8'))
+ trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.getAttribute('sum').encode('utf8'))
if trans:
- node.setAttribute('sum', trans.decode('utf8'))
+ node.setAttribute('sum', trans)
if childs:
for f in node.childNodes:
- fields.update(self.__view_look_dom(cr, user, f, context))
- return fields
-
- def __view_look_dom_arch(self, cr, user, node, context=None):
- fields_def = self.__view_look_dom(cr, user, node, context=context)
-
- buttons = xpath.Evaluate('//button', node)
- if buttons:
- for button in buttons:
- if button.getAttribute('type') == 'object':
- continue
+ fields.update(self.__view_look_dom(cr, user, f, view_id, context))
- ok = True
+ return fields
- if user != 1: # admin user has all roles
- serv = netsvc.LocalService('object_proxy')
- user_roles = serv.execute_cr(cr, user, 'res.users', 'read', [user], ['roles_id'])[0]['roles_id']
- cr.execute("select role_id from wkf_transition where signal='%s'" % button.getAttribute('name'))
- roles = cr.fetchall()
- for role in roles:
- if role[0]:
- ok = ok and serv.execute_cr(cr, user, 'res.roles', 'check', user_roles, role[0])
-
- if not ok:
- button.setAttribute('readonly', '1')
- else:
- button.setAttribute('readonly', '0')
+ def __view_look_dom_arch(self, cr, user, node, view_id, context=None):
+ fields_def = self.__view_look_dom(cr, user, node, view_id, context=context)
+
+ rolesobj = self.pool.get('res.roles')
+ usersobj = self.pool.get('res.users')
+
+ buttons = xpath.Evaluate("//button[@type != 'object']", node)
+ for button in buttons:
+ ok = True
+ if user != 1: # admin user has all roles
+ user_roles = usersobj.read(cr, user, [user], ['roles_id'])[0]['roles_id']
+ cr.execute("select role_id from wkf_transition where signal=%s", (button.getAttribute('name'),))
+ roles = cr.fetchall()
+ for role in roles:
+ if role[0]:
+ ok = ok and rolesobj.check(cr, user, user_roles, role[0])
+
+ if not ok:
+ button.setAttribute('readonly', '1')
+ else:
+ button.setAttribute('readonly', '0')
arch = node.toxml(encoding="utf-8").replace('\t', '')
fields = self.fields_get(cr, user, fields_def.keys(), context)
for field in fields_def:
- fields[field].update(fields_def[field])
+ if field in fields:
+ fields[field].update(fields_def[field])
+ else:
+ cr.execute('select name, model from ir_ui_view where (id=%s or inherit_id=%s) and arch like %s', (view_id, view_id, '%%%s%%' % field))
+ res = cr.fetchall()[:]
+ model = res[0][1]
+ res.insert(0, ("Can't find field '%s' in the following view parts composing the view of object model '%s':" % (field, model), None))
+ msg = "\n * ".join([r[0] for r in res])
+ msg += "\n\nEither you wrongly customised this view, or some modules bringing those views are not compatible with your current data model"
+ netsvc.Logger().notifyChannel('orm', netsvc.LOG_ERROR, msg)
+ raise except_orm('View error', msg)
+
return arch, fields
def __get_default_calendar_view(self):
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False):
if not context:
context = {}
+
+ def encode(s):
+ if isinstance(s, unicode):
+ return s.encode('utf8')
+ return s
+
def _inherit_apply(src, inherit):
def _find(node, node2):
if node2.nodeType == node2.ELEMENT_NODE and node2.localName == 'xpath':
if res:
return res
return None
+
- doc_src = dom.minidom.parseString(src)
- doc_dest = dom.minidom.parseString(inherit)
+ doc_src = dom.minidom.parseString(encode(src))
+ doc_dest = dom.minidom.parseString(encode(inherit))
toparse = doc_dest.childNodes
while len(toparse):
node2 = toparse.pop(0)
if attr != 'position'
])
tag = "<%s%s>" % (node2.localName, attrs)
- raise AttributeError(_("Couldn't find tag '%s' in parent view !") % tag)
+ raise AttributeError(_("Couldn't find tag '%s' in parent view !\n%s") % (tag,src))
return doc_src.toxml(encoding="utf-8").replace('\t', '')
result = {'type': view_type, 'model': self._name}
while ok:
if view_id:
where = (model and (" and model='%s'" % (self._name,))) or ''
- cr.execute('SELECT arch,name,field_parent,id,type,inherit_id FROM ir_ui_view WHERE id=%d'+where, (view_id,))
+ cr.execute('SELECT arch,name,field_parent,id,type,inherit_id FROM ir_ui_view WHERE id=%s'+where, (view_id,))
else:
cr.execute('''SELECT
arch,name,field_parent,id,type,inherit_id
def _inherit_apply_rec(result, inherit_id):
# get all views which inherit from (ie modify) this view
- cr.execute('select arch,id from ir_ui_view where inherit_id=%d and model=%s order by priority', (inherit_id, self._name))
+ cr.execute('select arch,id from ir_ui_view where inherit_id=%s and model=%s order by priority', (inherit_id, self._name))
sql_inherit = cr.fetchall()
for (inherit, id) in sql_inherit:
result = _inherit_apply(result, inherit)
# otherwise, build some kind of default view
if view_type == 'form':
res = self.fields_get(cr, user, context=context)
- xml = '''<?xml version="1.0" encoding="utf-8"?>''' \
- '''<form string="%s">''' % (self._description,)
+ xml = '<?xml version="1.0" encoding="utf-8"?> ' \
+ '<form string="%s">' % (self._description,)
for x in res:
if res[x]['type'] not in ('one2many', 'many2many'):
xml += '<field name="%s"/>' % (x,)
_rec_name = self._rec_name
if _rec_name not in self._columns:
_rec_name = self._columns.keys()[0]
- xml = '''<?xml version="1.0" encoding="utf-8"?>''' \
- '''<tree string="%s"><field name="%s"/></tree>''' \
- % (self._description, self._rec_name)
+ xml = '<?xml version="1.0" encoding="utf-8"?>' \
+ '<tree string="%s"><field name="%s"/></tree>' \
+ % (self._description, self._rec_name)
elif view_type == 'calendar':
xml = self.__get_default_calendar_view()
else:
- xml = ''
+ xml = '<?xml version="1.0"?>' # what happens here, graph case?
result['arch'] = xml
result['name'] = 'default'
result['field_parent'] = False
result['view_id'] = 0
- doc = dom.minidom.parseString(result['arch'].encode('utf-8'))
- xarch, xfields = self.__view_look_dom_arch(cr, user, doc, context=context)
+ try:
+ doc = dom.minidom.parseString(encode(result['arch']))
+ except Exception, ex:
+ logger = netsvc.Logger()
+ logger.notifyChannel('init', netsvc.LOG_DEBUG, 'Wrong arch in %s (%s):\n %s' % (result['name'], view_type, result['arch'] ))
+ raise except_orm('Error',
+ ('Invalid xml in view %s(%d) of %s: %s' % (result['name'], result['view_id'], self._name, str(ex))))
+ xarch, xfields = self.__view_look_dom_arch(cr, user, doc, view_id, context=context)
result['arch'] = xarch
result['fields'] = xfields
if toolbar:
raise _('The copy method is not implemented on this object !')
def read_string(self, cr, uid, id, langs, fields=None, context=None):
- if not context:
- context = {}
res = {}
res2 = {}
self.pool.get('ir.model.access').check(cr, uid, 'ir.translation', 'read')
res2 = self.pool.get(table).read_string(cr, uid, id, langs, cols, context)
for lang in res2:
if lang in res:
- res[lang] = {'code': lang}
+ res[lang]['code'] = lang
for f in res2[lang]:
res[lang][f] = res2[lang][f]
return res
def write_string(self, cr, uid, id, langs, vals, context=None):
- if not context:
- context = {}
self.pool.get('ir.model.access').check(cr, uid, 'ir.translation', 'write')
for lang in langs:
for field in vals:
self.pool.get(table).write_string(cr, uid, id, langs, vals, context)
return True
+ def _check_removed_columns(self, cr, log=False):
+ raise NotImplementedError()
class orm_memory(orm_template):
_protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count']
'id': id
})
return result
+
+ def _check_removed_columns(self, cr, log=False):
+ # nothing to check in memory...
+ pass
class orm(orm_template):
-
_sql_constraints = []
-
- _log_access = True
_table = None
_protected = ['read','write','create','default_get','perm_read','unlink','fields_get','fields_view_get','search','name_get','distinct_field_get','name_search','copy','import_data','search_count']
+
def _parent_store_compute(self, cr):
logger = netsvc.Logger()
- logger.notifyChannel('init', netsvc.LOG_INFO, 'Computing parent left and right for table %s...' % (self._table, ))
+ logger.notifyChannel('orm', netsvc.LOG_INFO, 'Computing parent left and right for table %s...' % (self._table, ))
def browse_rec(root, pos=0):
# TODO: set order
where = self._parent_name+'='+str(root)
if not root:
where = self._parent_name+' IS NULL'
+ if self._parent_order:
+ where += ' order by '+self._parent_order
cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
pos2 = pos + 1
childs = cr.fetchall()
for id in childs:
pos2 = browse_rec(id[0], pos2)
- cr.execute('update '+self._table+' set parent_left=%d, parent_right=%d where id=%d', (pos,pos2,root))
+ cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos,pos2,root))
return pos2+1
- browse_rec(None)
+ query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
+ if self._parent_order:
+ query += ' order by '+self._parent_order
+ pos = 0
+ cr.execute(query)
+ for (root,) in cr.fetchall():
+ pos = browse_rec(root, pos)
return True
+ def _update_store(self, cr, f, k):
+ logger = netsvc.Logger()
+ logger.notifyChannel('orm', netsvc.LOG_INFO, "storing computed values of fields.function '%s'" % (k,))
+ ss = self._columns[k]._symbol_set
+ update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
+ cr.execute('select id from '+self._table)
+ ids_lst = map(lambda x: x[0], cr.fetchall())
+ while ids_lst:
+ iids = ids_lst[:40]
+ ids_lst = ids_lst[40:]
+ res = f.get(cr, self, iids, k, 1, {})
+ for key,val in res.items():
+ if f._multi:
+ val = val[k]
+ # if val is a many2one, just write the ID
+ if type(val)==tuple:
+ val = val[0]
+ if (val<>False) or (type(val)<>bool):
+ cr.execute(update_query, (ss[1](val), key))
+
+ def _check_removed_columns(self, cr, log=False):
+ logger = netsvc.Logger()
+ # iterate on the database columns to drop the NOT NULL constraints
+ # of fields which were required but have been removed (or will be added by another module)
+ columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
+ columns += ('id', 'write_uid', 'write_date', 'create_uid', 'create_date') # openerp access columns
+ cr.execute("SELECT a.attname, a.attnotnull"
+ " FROM pg_class c, pg_attribute a"
+ " WHERE c.relname=%%s"
+ " AND c.oid=a.attrelid"
+ " AND a.attisdropped=%%s"
+ " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
+ " AND a.attname NOT IN (%s)" % ",".join(['%s']*len(columns)),
+ [self._table, False] + columns)
+ for column in cr.dictfetchall():
+ if log:
+ logger.notifyChannel("orm", netsvc.LOG_DEBUG, "column %s is in the table %s but not in the corresponding object %s" % (column['attname'], self._table, self._name))
+ if column['attnotnull']:
+ cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
+
def _auto_init(self, cr, context={}):
store_compute = False
logger = netsvc.Logger()
create = False
+ todo_end = []
self._field_create(cr, context=context)
if not hasattr(self, "_auto") or self._auto:
cr.execute("SELECT relname FROM pg_class WHERE relkind in ('r','v') AND relname='%s'" % self._table)
""", (self._table, 'parent_left'))
if not cr.rowcount:
if 'parent_left' not in self._columns:
- logger.notifyChannel('init', netsvc.LOG_ERROR, 'create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)' % (self._table, ))
+ logger.notifyChannel('orm', netsvc.LOG_ERROR, 'create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)' % (self._table, ))
if 'parent_right' not in self._columns:
- logger.notifyChannel('init', netsvc.LOG_ERROR, 'create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)' % (self._table, ))
+ logger.notifyChannel('orm', netsvc.LOG_ERROR, 'create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)' % (self._table, ))
if self._columns[self._parent_name].ondelete<>'cascade':
- logger.notifyChannel('init', netsvc.LOG_ERROR, "the columns %s on object must be set as ondelete='cascasde'" % (self._name, self._parent_name))
- cr.execute("ALTER TABLE \"%s\" ADD COLUMN \"%s\" INTEGER" % (self._table, 'parent_left'))
- cr.execute("ALTER TABLE \"%s\" ADD COLUMN \"%s\" INTEGER" % (self._table, 'parent_right'))
+ logger.notifyChannel('orm', netsvc.LOG_ERROR, "the columns %s on object must be set as ondelete='cascasde'" % (self._name, self._parent_name))
+ cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
+ cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
cr.commit()
store_compute = True
'write_date': 'TIMESTAMP'
}
for k in logs:
- cr.execute(
- """
+ cr.execute("""
SELECT c.relname
- FROM pg_class c, pg_attribute a
- WHERE c.relname='%s' AND a.attname='%s' AND c.oid=a.attrelid
- """ % (self._table, k))
+ FROM pg_class c, pg_attribute a
+ WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
+ """, (self._table, k))
if not cr.rowcount:
- cr.execute("ALTER TABLE \"%s\" ADD COLUMN \"%s\" %s" %
- (self._table, k, logs[k]))
+ cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, logs[k]))
cr.commit()
-
- # iterate on the database columns to drop the NOT NULL constraints
- # of fields which were required but have been removed
- cr.execute(
- "SELECT a.attname, a.attnotnull "\
- "FROM pg_class c, pg_attribute a "\
- "WHERE c.oid=a.attrelid AND c.relname='%s'" % self._table)
- db_columns = cr.dictfetchall()
- for column in db_columns:
- if column['attname'] not in ('id', 'oid', 'tableoid', 'ctid', 'xmin', 'xmax', 'cmin', 'cmax'):
- if column['attnotnull'] and column['attname'] not in self._columns:
- cr.execute("ALTER TABLE \"%s\" ALTER COLUMN \"%s\" DROP NOT NULL" % (self._table, column['attname']))
+
+ self._check_removed_columns(cr, log=False)
# iterate on the "object columns"
+ todo_update_store = []
for k in self._columns:
if k in ('id', 'write_uid', 'write_date', 'create_uid', 'create_date'):
continue
if isinstance(f, fields.one2many):
cr.execute("SELECT relname FROM pg_class WHERE relkind='r' AND relname=%s", (f._obj,))
if cr.fetchone():
- cr.execute("SELECT count(*) as c FROM pg_class c,pg_attribute a WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid", (f._obj, f._fields_id))
+ cr.execute("SELECT count(1) as c FROM pg_class c,pg_attribute a WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid", (f._obj, f._fields_id))
res = cr.fetchone()[0]
if not res:
- cr.execute("ALTER TABLE \"%s\" ADD FOREIGN KEY (%s) REFERENCES \"%s\" ON DELETE SET NULL" % (self._obj, f._fields_id, f._table))
+ cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY (%s) REFERENCES "%s" ON DELETE SET NULL' % (self._obj, f._fields_id, f._table))
elif isinstance(f, fields.many2many):
cr.execute("SELECT relname FROM pg_class WHERE relkind in ('r','v') AND relname=%s", (f._rel,))
if not cr.dictfetchall():
ref = self.pool.get(f._obj)._table
except AttributeError:
ref = f._obj.replace('.', '_')
- cr.execute("CREATE TABLE \"%s\" (\"%s\" INTEGER NOT NULL REFERENCES \"%s\" ON DELETE CASCADE, \"%s\" INTEGER NOT NULL REFERENCES \"%s\" ON DELETE CASCADE) WITH OIDS"%(f._rel, f._id1, self._table, f._id2, ref))
- cr.execute("CREATE INDEX \"%s_%s_index\" ON \"%s\" (\"%s\")" % (f._rel, f._id1, f._rel, f._id1))
- cr.execute("CREATE INDEX \"%s_%s_index\" ON \"%s\" (\"%s\")" % (f._rel, f._id2, f._rel, f._id2))
+ cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL REFERENCES "%s" ON DELETE CASCADE, "%s" INTEGER NOT NULL REFERENCES "%s" ON DELETE CASCADE) WITH OIDS' % (f._rel, f._id1, self._table, f._id2, ref))
+ cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (f._rel, f._id1, f._rel, f._id1))
+ cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (f._rel, f._id2, f._rel, f._id2))
cr.commit()
else:
- cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size FROM pg_class c,pg_attribute a,pg_type t WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid AND a.atttypid=t.oid", (self._table, k))
+ cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size " \
+ "FROM pg_class c,pg_attribute a,pg_type t " \
+ "WHERE c.relname=%s " \
+ "AND a.attname=%s " \
+ "AND c.oid=a.attrelid " \
+ "AND a.atttypid=t.oid", (self._table, k))
res = cr.dictfetchall()
if not res:
if not isinstance(f, fields.function) or f.store:
# add the missing field
- cr.execute("ALTER TABLE \"%s\" ADD COLUMN \"%s\" %s" % (self._table, k, get_pg_type(f)[1]))
+ cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
# initialize it
if not create and k in self._defaults:
default = self._defaults[k](self, cr, 1, {})
- if not default:
- cr.execute("UPDATE \"%s\" SET \"%s\"=NULL" % (self._table, k))
- else:
- cr.execute("UPDATE \"%s\" SET \"%s\"='%s'" % (self._table, k, default))
+ ss = self._columns[k]._symbol_set
+ query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
+ cr.execute(query, (ss[1](default),))
+ cr.commit()
+ logger.notifyChannel('orm', netsvc.LOG_DEBUG, 'setting default value of new column %s of table %s'% (k, self._table))
+ elif not create:
+ logger.notifyChannel('orm', netsvc.LOG_DEBUG, 'creating new column %s of table %s'% (k, self._table))
+
if isinstance(f, fields.function):
- cr.execute('select id from '+self._table)
- ids_lst = map(lambda x: x[0], cr.fetchall())
- while ids_lst:
- iids = ids_lst[:40]
- ids_lst = ids_lst[40:]
- res = f.get(cr, self, iids, k, 1, {})
- for key,val in res.items():
- if f._multi:
- val = val[k]
- if (val<>False) or (type(val)<>bool):
- cr.execute("UPDATE \"%s\" SET \"%s\"='%s' where id=%d"% (self._table, k, val, key))
- #else:
- # cr.execute("UPDATE \"%s\" SET \"%s\"=NULL where id=%d"% (self._table, k, key))
+ order = 10
+ if f.store is not True:
+ order = f.store[f.store.keys()[0]][2]
+ todo_update_store.append((order, f,k))
# and add constraints if needed
if isinstance(f, fields.many2one):
ref = f._obj.replace('.', '_')
# ir_actions is inherited so foreign key doesn't work on it
if ref != 'ir_actions':
- cr.execute("ALTER TABLE \"%s\" ADD FOREIGN KEY (\"%s\") REFERENCES \"%s\" ON DELETE %s" % (self._table, k, ref, f.ondelete))
+ cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (self._table, k, ref, f.ondelete))
if f.select:
- cr.execute("CREATE INDEX \"%s_%s_index\" ON \"%s\" (\"%s\")" % (self._table, k, self._table, k))
+ cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
if f.required:
- cr.commit()
try:
- cr.execute("ALTER TABLE \"%s\" ALTER COLUMN \"%s\" SET NOT NULL" % (self._table, k))
- except:
- logger.notifyChannel('init', netsvc.LOG_WARNING, 'WARNING: unable to set column %s of table %s not null !\nTry to re-run: openerp-server.py --update=module\nIf it doesn\'t work, update records and execute manually:\nALTER TABLE %s ALTER COLUMN %s SET NOT NULL' % (k, self._table, self._table, k))
+ cr.commit()
+ cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
+ except Exception, e:
+ logger.notifyChannel('orm', netsvc.LOG_WARNING, 'WARNING: unable to set column %s of table %s not null !\nTry to re-run: openerp-server.py --update=module\nIf it doesn\'t work, update records and execute manually:\nALTER TABLE %s ALTER COLUMN %s SET NOT NULL' % (k, self._table, self._table, k))
cr.commit()
elif len(res)==1:
f_pg_def = res[0]
f_pg_size = f_pg_def['size']
f_pg_notnull = f_pg_def['attnotnull']
if isinstance(f, fields.function) and not f.store:
- logger.notifyChannel('init', netsvc.LOG_WARNING, 'column %s (%s) in table %s was converted to a function !\nYou should remove this column from your database.' % (k, f.string, self._table))
+ logger.notifyChannel('orm', netsvc.LOG_INFO, 'column %s (%s) in table %s removed: converted to a function !\n' % (k, f.string, self._table))
+ cr.execute('ALTER TABLE %s DROP COLUMN %s'% (self._table, k))
+ cr.commit()
f_obj_type = None
else:
f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
if f_obj_type:
- if f_pg_type != f_obj_type:
- logger.notifyChannel('init', netsvc.LOG_WARNING, "column '%s' in table '%s' has changed type (DB = %s, def = %s) !" % (k, self._table, f_pg_type, f._type))
+ ok = False
+ casts = [
+ ('text', 'char', 'VARCHAR(%d)' % (f.size or 0,), '::VARCHAR(%d)'%(f.size or 0,)),
+ ('varchar', 'text', 'TEXT', ''),
+ ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
+ ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
+ ]
if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size != f.size:
- # columns with the name 'type' cannot be changed for an unknown reason?!
- if k != 'type':
- if f_pg_size > f.size:
- logger.notifyChannel('init', netsvc.LOG_WARNING, "column '%s' in table '%s' has changed size (DB = %d, def = %d), DB size will be kept !" % (k, self._table, f_pg_size, f.size))
- # If actual DB size is < than new
- # We update varchar size, otherwise, we keep DB size
- # to avoid truncated string...
- if f_pg_size < f.size:
- cr.execute("ALTER TABLE \"%s\" RENAME COLUMN \"%s\" TO temp_change_size" % (self._table, k))
- cr.execute("ALTER TABLE \"%s\" ADD COLUMN \"%s\" VARCHAR(%d)" % (self._table, k, f.size))
- cr.execute("UPDATE \"%s\" SET \"%s\"=temp_change_size::VARCHAR(%d)" % (self._table, k, f.size))
- cr.execute("ALTER TABLE \"%s\" DROP COLUMN temp_change_size" % (self._table,))
- cr.commit()
- if f_pg_type == 'date' and f._type == 'datetime':
- cr.execute("ALTER TABLE \"%s\" RENAME COLUMN \"%s\" TO temp_change_type" % (self._table, k))
- cr.execute("ALTER TABLE \"%s\" ADD COLUMN \"%s\" TIMESTAMP " % (self._table, k))
- cr.execute("UPDATE \"%s\" SET \"%s\"=temp_change_type::TIMESTAMP" % (self._table, k))
- cr.execute("ALTER TABLE \"%s\" DROP COLUMN temp_change_type" % (self._table,))
- cr.commit()
+ logger.notifyChannel('orm', netsvc.LOG_INFO, "column '%s' in table '%s' changed size" % (k, self._table))
+ cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
+ cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" VARCHAR(%d)' % (self._table, k, f.size))
+ cr.execute('UPDATE "%s" SET "%s"=temp_change_size::VARCHAR(%d)' % (self._table, k, f.size))
+ cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size' % (self._table,))
+ cr.commit()
+ for c in casts:
+ if (f_pg_type==c[0]) and (f._type==c[1]):
+ logger.notifyChannel('orm', netsvc.LOG_INFO, "column '%s' in table '%s' changed type to %s." % (k, self._table, c[1]))
+ ok = True
+ cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
+ cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
+ cr.execute(('UPDATE "%s" SET "%s"=temp_change_size'+c[3]) % (self._table, k))
+ cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
+ cr.commit()
+
+ if f_pg_type != f_obj_type:
+ if not ok:
+ logger.notifyChannel('orm', netsvc.LOG_WARNING, "column '%s' in table '%s' has changed type (DB = %s, def = %s) but unable to migrate this change !" % (k, self._table, f_pg_type, f._type))
+
# if the field is required and hasn't got a NOT NULL constraint
if f.required and f_pg_notnull == 0:
# set the field to the default value if any
if k in self._defaults:
default = self._defaults[k](self, cr, 1, {})
- if not (default is False):
- cr.execute("UPDATE \"%s\" SET \"%s\"='%s' WHERE %s is NULL" % (self._table, k, default, k))
- cr.commit()
+ if (default is not None):
+ ss = self._columns[k]._symbol_set
+ query = 'UPDATE "%s" SET "%s"=%s WHERE %s is NULL' % (self._table, k, ss[0], k)
+ cr.execute(query, (ss[1](default),))
# add the NOT NULL constraint
+ cr.commit()
try:
- cr.execute("ALTER TABLE \"%s\" ALTER COLUMN \"%s\" SET NOT NULL" % (self._table, k))
+ cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
cr.commit()
- except:
- logger.notifyChannel('init', netsvc.LOG_WARNING, 'unable to set a NOT NULL constraint on column %s of the %s table !\nIf you want to have it, you should update the records and execute manually:\nALTER TABLE %s ALTER COLUMN %s SET NOT NULL' % (k, self._table, self._table, k))
+ except Exception, e:
+ logger.notifyChannel('orm', netsvc.LOG_WARNING, 'unable to set a NOT NULL constraint on column %s of the %s table !\nIf you want to have it, you should update the records and execute manually:\nALTER TABLE %s ALTER COLUMN %s SET NOT NULL' % (k, self._table, self._table, k))
cr.commit()
elif not f.required and f_pg_notnull == 1:
- cr.execute("ALTER TABLE \"%s\" ALTER COLUMN \"%s\" DROP NOT NULL" % (self._table, k))
+ cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
- cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = '%s_%s_index' and tablename = '%s'" % (self._table, k, self._table))
+ indexname = '%s_%s_index' % (self._table, k)
+ cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
res = cr.dictfetchall()
if not res and f.select:
- cr.execute("CREATE INDEX \"%s_%s_index\" ON \"%s\" (\"%s\")" % (self._table, k, self._table, k))
+ cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
cr.commit()
if res and not f.select:
- cr.execute("DROP INDEX \"%s_%s_index\"" % (self._table, k))
+ cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
if isinstance(f, fields.many2one):
ref = self.pool.get(f._obj)._table
if ref != 'ir_actions':
- cr.execute('SELECT confdeltype, conname FROM pg_constraint as con, pg_class as cl1, pg_class as cl2, ' \
- 'pg_attribute as att1, pg_attribute as att2 ' \
- 'WHERE con.conrelid = cl1.oid ' \
- 'AND cl1.relname = %s ' \
- 'AND con.confrelid = cl2.oid ' \
- 'AND cl2.relname = %s ' \
- 'AND array_lower(con.conkey, 1) = 1 ' \
- 'AND con.conkey[1] = att1.attnum ' \
- 'AND att1.attrelid = cl1.oid ' \
- 'AND att1.attname = %s ' \
- 'AND array_lower(con.confkey, 1) = 1 ' \
- 'AND con.confkey[1] = att2.attnum ' \
- 'AND att2.attrelid = cl2.oid ' \
- 'AND att2.attname = %s ' \
- 'AND con.contype = \'f\'', (self._table, ref, k, 'id'))
+ cr.execute('SELECT confdeltype, conname FROM pg_constraint as con, pg_class as cl1, pg_class as cl2, '
+ 'pg_attribute as att1, pg_attribute as att2 '
+ 'WHERE con.conrelid = cl1.oid '
+ 'AND cl1.relname = %s '
+ 'AND con.confrelid = cl2.oid '
+ 'AND cl2.relname = %s '
+ 'AND array_lower(con.conkey, 1) = 1 '
+ 'AND con.conkey[1] = att1.attnum '
+ 'AND att1.attrelid = cl1.oid '
+ 'AND att1.attname = %s '
+ 'AND array_lower(con.confkey, 1) = 1 '
+ 'AND con.confkey[1] = att2.attnum '
+ 'AND att2.attrelid = cl2.oid '
+ 'AND att2.attname = %s '
+ "AND con.contype = 'f'", (self._table, ref, k, 'id'))
res = cr.dictfetchall()
if res:
confdeltype = {
cr.execute('ALTER TABLE "' + self._table + '" ADD FOREIGN KEY ("' + k + '") REFERENCES "' + ref + '" ON DELETE ' + f.ondelete)
cr.commit()
else:
- print "ERROR"
+ logger.notifyChannel('orm', netsvc.LOG_ERROR, "Programming error !")
+ for order,f,k in todo_update_store:
+ todo_end.append((order, self._update_store, (f, k)))
+
else:
- cr.execute("SELECT relname FROM pg_class WHERE relkind in ('r','v') AND relname='%s'" % self._table)
+ cr.execute("SELECT relname FROM pg_class WHERE relkind in ('r','v') AND relname=%s", (self._table,))
create = not bool(cr.fetchone())
for (key, con, _) in self._sql_constraints:
- cr.execute("SELECT conname FROM pg_constraint where conname='%s_%s'" % (self._table, key))
+ conname = '%s_%s' % (self._table, key)
+ cr.execute("SELECT conname FROM pg_constraint where conname=%s", (conname,))
if not cr.dictfetchall():
try:
- cr.execute('alter table \"%s\" add constraint \"%s_%s\" %s' % (self._table, self._table, key, con,))
+ cr.execute('alter table "%s" add constraint "%s_%s" %s' % (self._table, self._table, key, con,))
cr.commit()
except:
- logger.notifyChannel('init', netsvc.LOG_WARNING, 'unable to add \'%s\' constraint on table %s !\n If you want to have it, you should update the records and execute manually:\nALTER table %s ADD CONSTRAINT %s_%s %s' % (con, self._table, self._table, self._table, key, con,))
+ logger.notifyChannel('orm', netsvc.LOG_WARNING, 'unable to add \'%s\' constraint on table %s !\n If you want to have it, you should update the records and execute manually:\nALTER table %s ADD CONSTRAINT %s_%s %s' % (con, self._table, self._table, self._table, key, con,))
if create:
if hasattr(self, "_sql"):
cr.commit()
if store_compute:
self._parent_store_compute(cr)
+ return todo_end
def __init__(self, cr):
super(orm, self).__init__(cr)
+
+ if not hasattr(self, '_log_access'):
+ # if not access is not specify, it is the same value as _auto
+ self._log_access = not hasattr(self, "_auto") or self._auto
+
self._columns = self._columns.copy()
- f = filter(lambda a: isinstance(self._columns[a], fields.function) and self._columns[a].store, self._columns)
- if f:
- list_store = []
- tuple_store = ()
- tuple_fn = ()
- for store_field in f:
- if not self._columns[store_field].store == True:
- dict_store = self._columns[store_field].store
- key = dict_store.keys()
- list_data = []
- for i in key:
- tuple_store = self._name, store_field, self._columns[store_field]._fnct.__name__, tuple(dict_store[i][0]), dict_store[i][1], i
- list_data.append(tuple_store)
- #tuple_store=self._name,store_field,self._columns[store_field]._fnct.__name__,tuple(dict_store[key[0]][0]),dict_store[key[0]][1]
- for l in list_data:
- list_store = []
- if l[5] in self.pool._store_function.keys():
- self.pool._store_function[l[5]].append(l)
- temp_list = list(set(self.pool._store_function[l[5]]))
- self.pool._store_function[l[5]] = temp_list
- else:
- list_store.append(l)
- self.pool._store_function[l[5]] = list_store
+ for store_field in self._columns:
+ f = self._columns[store_field]
+ if not isinstance(f, fields.function):
+ continue
+ if not f.store:
+ continue
+ if self._columns[store_field].store is True:
+ sm = {self._name:(lambda self,cr, uid, ids, c={}: ids, None, 10)}
+ else:
+ sm = self._columns[store_field].store
+ for object, aa in sm.items():
+ if len(aa)==3:
+ (fnct,fields2,order)=aa
+ else:
+ raise except_orm('Error',
+ ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority)}.' % (store_field, self._name)))
+ self.pool._store_function.setdefault(object, [])
+ ok = True
+ for x,y,z,e,f in self.pool._store_function[object]:
+ if (x==self._name) and (y==store_field) and (e==fields2):
+ ok = False
+ if ok:
+ self.pool._store_function[object].append( (self._name, store_field, fnct, fields2, order))
+ self.pool._store_function[object].sort(lambda x,y: cmp(x[4],y[4]))
for (key, _, msg) in self._sql_constraints:
self.pool._sql_error[self._table+'_'+key] = msg
'translate': (field['translate']),
#'select': int(field['select_level'])
}
+
if field['ttype'] == 'selection':
self._columns[field['name']] = getattr(fields, field['ttype'])(eval(field['selection']), **attrs)
+ elif field['ttype'] == 'reference':
+ self._columns[field['name']] = getattr(fields, field['ttype'])(selection=eval(field['selection']), **attrs)
elif field['ttype'] == 'many2one':
self._columns[field['name']] = getattr(fields, field['ttype'])(field['relation'], **attrs)
elif field['ttype'] == 'one2many':
value[key[8:]] = context[key]
return value
-
#
# Update objects that uses this one to update their _inherits fields
#
if v == None:
r[key] = False
if isinstance(ids, (int, long)):
- return result[0]
+ return result and result[0] or False
return result
def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
d1, d2 = self.pool.get('ir.rule').domain_get(cr, user, self._name)
# all inherited fields + all non inherited fields for which the attribute whose name is in load is True
- fields_pre = filter(lambda x: x in self._columns and getattr(self._columns[x], '_classic_write'), fields_to_read) + self._inherits.values()
+ fields_pre = [f for f in fields_to_read if
+ f == self.CONCURRENCY_CHECK_FIELD
+ or (f in self._columns and getattr(self._columns[f], '_classic_write'))
+ ] + self._inherits.values()
res = []
if len(fields_pre):
def convert_field(f):
if f in ('create_date', 'write_date'):
return "date_trunc('second', %s) as %s" % (f, f)
+ if f == self.CONCURRENCY_CHECK_FIELD:
+ if self._log_access:
+ return "COALESCE(write_date, create_date, now())::timestamp AS %s" % (f,)
+ return "now()::timestamp AS %s" % (f,)
if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
return "length(%s) as %s" % (f,f)
return '"%s"' % (f,)
- #fields_pre2 = map(lambda x: (x in ('create_date', 'write_date')) and ('date_trunc(\'second\', '+x+') as '+x) or '"'+x+'"', fields_pre)
fields_pre2 = map(convert_field, fields_pre)
for i in range(0, len(ids), cr.IN_MAX):
sub_ids = ids[i:i+cr.IN_MAX]
res = map(lambda x: {'id': x}, ids)
for f in fields_pre:
+ if f == self.CONCURRENCY_CHECK_FIELD:
+ continue
if self._columns[f].translate:
ids = map(lambda x: x['id'], res)
res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context.get('lang', False) or 'en_US', ids)
# to get the _symbol_get in each occurence
for r in res:
for f in fields_post:
- r[f] = self.columns[f]._symbol_get(r[f])
+ r[f] = self._columns[f]._symbol_get(r[f])
ids = map(lambda x: x['id'], res)
# all non inherited fields for which the attribute whose name is in load is False
return res[ids]
return res
- def unlink(self, cr, uid, ids, context=None):
+ def _check_concurrency(self, cr, ids, context):
if not context:
- context = {}
+ return
+ if context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access:
+ def key(oid):
+ return "%s,%s" % (self._name, oid)
+ santa = "(id = %s AND %s < COALESCE(write_date, create_date, now())::timestamp)"
+ for i in range(0, len(ids), cr.IN_MAX):
+ sub_ids = tools.flatten(((oid, context[self.CONCURRENCY_CHECK_FIELD][key(oid)])
+ for oid in ids[i:i+cr.IN_MAX]
+ if key(oid) in context[self.CONCURRENCY_CHECK_FIELD]))
+ if sub_ids:
+ cr.execute("SELECT count(1) FROM %s WHERE %s" % (self._table, " OR ".join([santa]*(len(sub_ids)/2))), sub_ids)
+ res = cr.fetchone()
+ if res and res[0]:
+ raise except_orm('ConcurrencyException', _('Records were modified in the meanwhile'))
+
+ def unlink(self, cr, uid, ids, context=None):
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
- fn_list = []
- if self._name in self.pool._store_function.keys():
- list_store = self.pool._store_function[self._name]
- fn_data = ()
- id_change = []
- for tuple_fn in list_store:
- for id in ids:
- id_change.append(self._store_get_ids(cr, uid, id, tuple_fn, context)[0])
- fn_data = id_change, tuple_fn
- fn_list.append(fn_data)
-
- delta = context.get('read_delta', False)
- if delta and self._log_access:
- for i in range(0, len(ids), cr.IN_MAX):
- sub_ids = ids[i:i+cr.IN_MAX]
- cr.execute("select (now() - min(write_date)) <= '%s'::interval " \
- "from \"%s\" where id in (%s)" %
- (delta, self._table, ",".join(map(str, sub_ids))))
- res = cr.fetchone()
- if res and res[0]:
- raise except_orm(_('ConcurrencyException'),
- _('This record was modified in the meanwhile'))
+ result_store = self._store_get_values(cr, uid, ids, None, context)
+
+ self._check_concurrency(cr, ids, context)
self.pool.get('ir.model.access').check(cr, uid, self._name, 'unlink')
for i in range(0, len(ids), cr.IN_MAX):
sub_ids = ids[i:i+cr.IN_MAX]
- str_d = string.join(('%d',)*len(sub_ids), ',')
+ str_d = string.join(('%s',)*len(sub_ids), ',')
if d1:
cr.execute('SELECT id FROM "'+self._table+'" ' \
'WHERE id IN ('+str_d+')'+d1, sub_ids+d2)
else:
cr.execute('delete from "'+self._table+'" ' \
'where id in ('+str_d+')', sub_ids)
- if fn_list:
- for ids, tuple_fn in fn_list:
- self._store_set_values(cr, uid, ids, tuple_fn, id_change, context)
+ for order, object, ids, fields in result_store:
+ if object<>self._name:
+ cr.execute('select id from '+self._table+' where id in ('+','.join(map(str, ids))+')')
+ ids = map(lambda x: x[0], cr.fetchall())
+ if ids:
+ self.pool.get(object)._store_set_values(cr, uid, ids, fields, context)
return True
#
return True
if isinstance(ids, (int, long)):
ids = [ids]
- delta = context.get('read_delta', False)
- if delta and self._log_access:
- for i in range(0, len(ids), cr.IN_MAX):
- sub_ids = ids[i:i+cr.IN_MAX]
- cr.execute("select (now() - min(write_date)) <= '%s'::interval " \
- "from %s where id in (%s)" %
- (delta, self._table, ",".join(map(str, sub_ids))))
- res = cr.fetchone()
- if res and res[0]:
- for field in vals:
- if field in self._columns and self._columns[field]._classic_write:
- raise except_orm(_('ConcurrencyException'),
- _('This record was modified in the meanwhile'))
+
+ self._check_concurrency(cr, ids, context)
self.pool.get('ir.model.access').check(cr, user, self._name, 'write')
- #for v in self._inherits.values():
- # assert v not in vals, (v, vals)
upd0 = []
upd1 = []
upd_todo = []
totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
for field in vals:
if field in self._columns:
- if self._columns[field]._classic_write:
+ if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
if (not totranslate) or not self._columns[field].translate:
upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
upd1.append(self._columns[field]._symbol_set[1](vals[field]))
% (vals[field], field))
if self._log_access:
- upd0.append('write_uid=%d')
+ upd0.append('write_uid=%s')
upd0.append('write_date=now()')
upd1.append(user)
if self.pool._init:
self.pool._init_parent[self._name]=True
else:
- cr.execute('select parent_left,parent_right from '+self._table+' where id=%d', (vals[self._parent_name],))
- res = cr.fetchone()
- if res:
- pleft,pright = res
- else:
- cr.execute('select max(parent_right),max(parent_right)+1 from '+self._table)
- pleft,pright = cr.fetchone()
- cr.execute('select parent_left,parent_right,id from '+self._table+' where id in ('+','.join(map(lambda x:'%d',ids))+')', ids)
- dest = pleft + 1
- for cleft,cright,cid in cr.fetchall():
- if cleft > pleft:
- treeshift = pleft - cleft + 1
- leftbound = pleft+1
- rightbound = cleft-1
- cwidth = cright-cleft+1
- leftrange = cright
- rightrange = pleft
+ for id in ids:
+ if vals[self._parent_name]:
+ cr.execute('select parent_left,parent_right,id from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (vals[self._parent_name],))
+ pleft_old = pright_old = None
+ result_p = cr.fetchall()
+ for (pleft,pright,pid) in result_p:
+ if pid == id:
+ break
+ pleft_old = pleft
+ pright_old = pright
+ if not pleft_old:
+ cr.execute('select parent_left,parent_right from '+self._table+' where id=%s', (vals[self._parent_name],))
+ pleft_old,pright_old = cr.fetchone()
+ res = (pleft_old, pright_old)
else:
- treeshift = pleft - cright
- leftbound = cright + 1
- rightbound = pleft
- cwidth = cleft-cright-1
- leftrange = pleft+1
- rightrange = cleft
- cr.execute('UPDATE '+self._table+'''
- SET
- parent_left = CASE
- WHEN parent_left BETWEEN %d AND %d THEN parent_left + %d
- WHEN parent_left BETWEEN %d AND %d THEN parent_left + %d
- ELSE parent_left
- END,
- parent_right = CASE
- WHEN parent_right BETWEEN %d AND %d THEN parent_right + %d
- WHEN parent_right BETWEEN %d AND %d THEN parent_right + %d
- ELSE parent_right
- END
- WHERE
- parent_left<%d OR parent_right>%d;
- ''', (leftbound,rightbound,cwidth,cleft,cright,treeshift,leftbound,rightbound,
- cwidth,cleft,cright,treeshift,leftrange,rightrange))
-
- if 'read_delta' in context:
- del context['read_delta']
+ cr.execute('SELECT parent_left,parent_right FROM '+self._table+' WHERE id IS NULL')
+ res = cr.fetchone()
+ if res:
+ pleft,pright = res
+ else:
+ cr.execute('select max(parent_right),max(parent_right)+1 from '+self._table)
+ pleft,pright = cr.fetchone()
+ cr.execute('select parent_left,parent_right,id from '+self._table+' where id in ('+','.join(map(lambda x:'%s',ids))+')', ids)
+ dest = pleft + 1
+ for cleft,cright,cid in cr.fetchall():
+ if cleft > pleft:
+ treeshift = pleft - cleft + 1
+ leftbound = pleft+1
+ rightbound = cleft-1
+ cwidth = cright-cleft+1
+ leftrange = cright
+ rightrange = pleft
+ else:
+ treeshift = pleft - cright
+ leftbound = cright + 1
+ rightbound = pleft
+ cwidth = cleft-cright-1
+ leftrange = pleft+1
+ rightrange = cleft
+ cr.execute('UPDATE '+self._table+'''
+ SET
+ parent_left = CASE
+ WHEN parent_left BETWEEN %s AND %s THEN parent_left + %s
+ WHEN parent_left BETWEEN %s AND %s THEN parent_left + %s
+ ELSE parent_left
+ END,
+ parent_right = CASE
+ WHEN parent_right BETWEEN %s AND %s THEN parent_right + %s
+ WHEN parent_right BETWEEN %s AND %s THEN parent_right + %s
+ ELSE parent_right
+ END
+ WHERE
+ parent_left<%s OR parent_right>%s;
+ ''', (leftbound,rightbound,cwidth,cleft,cright,treeshift,leftbound,rightbound,
+ cwidth,cleft,cright,treeshift,leftrange,rightrange))
+
+ result = self._store_get_values(cr, user, ids, vals.keys(), context)
+ for order, object, ids, fields in result:
+ self.pool.get(object)._store_set_values(cr, user, ids, fields, context)
wf_service = netsvc.LocalService("workflow")
for id in ids:
wf_service.trg_write(user, self._name, id, cr)
- self._update_function_stored(cr, user, ids, context=context)
-
- if self._name in self.pool._store_function.keys():
- list_store = self.pool._store_function[self._name]
- for tuple_fn in list_store:
- flag = False
- if not tuple_fn[3]:
- flag = True
- for field in tuple_fn[3]:
- if field in vals.keys():
- flag = True
- break
- if flag:
- id_change = self._store_get_ids(cr, user, ids[0], tuple_fn, context)
- self._store_set_values(cr, user, ids[0], tuple_fn, id_change, context)
-
return True
#
for f in self._columns.keys(): # + self._inherit_fields.keys():
if not f in vals:
default.append(f)
+
for f in self._inherit_fields.keys():
- if (not f in vals) and (not self._inherit_fields[f][0] in avoid_table):
+ if (not f in vals) and (self._inherit_fields[f][0] not in avoid_table):
default.append(f)
if len(default):
- vals.update(self.default_get(cr, user, default, context))
+ default_values = self.default_get(cr, user, default, context)
+ for dv in default_values:
+ if dv in self._columns and self._columns[dv]._type == 'many2many':
+ if default_values[dv] and isinstance(default_values[dv][0], (int, long)):
+ default_values[dv] = [(6, 0, default_values[dv])]
+
+ vals.update(default_values)
tocreate = {}
for v in self._inherits:
tocreate[table][v] = vals[v]
del vals[v]
- cr.execute("SELECT nextval('"+self._sequence+"')")
+ # Try-except added to filter the creation of those records whose filds are readonly.
+ # Example : any dashboard which has all the fields readonly.(due to Views(database views))
+ try:
+ cr.execute("SELECT nextval('"+self._sequence+"')")
+ except:
+ raise except_orm(_('UserError'),
+ _('You cannot perform this operation.'))
+
id_new = cr.fetchone()[0]
for table in tocreate:
id = self.pool.get(table).create(cr, user, tocreate[table])
upd0 += ','+self._inherits[table]
- upd1 += ',%d'
+ upd1 += ',%s'
upd2.append(id)
for field in vals:
% (vals[field], field))
if self._log_access:
upd0 += ',create_uid,create_date'
- upd1 += ',%d,now()'
+ upd1 += ',%s,now()'
upd2.append(user)
cr.execute('insert into "'+self._table+'" (id'+upd0+") values ("+str(id_new)+upd1+')', tuple(upd2))
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
else:
parent = vals.get(self._parent_name, False)
if parent:
- cr.execute('select parent_left from '+self._table+' where id=%d', (parent,))
- pleft = cr.fetchone()[0]
+ cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
+ pleft_old = None
+ result_p = cr.fetchall()
+ for (pleft,) in result_p:
+ if not pleft:
+ break
+ pleft_old = pleft
+ if not pleft_old:
+ cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
+ pleft_old = cr.fetchone()[0]
+ pleft = pleft_old
else:
cr.execute('select max(parent_right) from '+self._table)
pleft = cr.fetchone()[0] or 0
- cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%d', (pleft,))
- cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%d', (pleft,))
- cr.execute('update '+self._table+' set parent_left=%d,parent_right=%d where id=%d', (pleft+1,pleft+2,id_new))
+ cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
+ cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
+ cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1,pleft+2,id_new))
+
+ result = self._store_get_values(cr, user, [id_new], vals.keys(), context)
+ for order, object, ids, fields in result:
+ self.pool.get(object)._store_set_values(cr, user, ids, fields, context)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_create(user, self._name, id_new, cr)
- self._update_function_stored(cr, user, [id_new], context=context)
- if self._name in self.pool._store_function.keys():
- list_store = self.pool._store_function[self._name]
- for tuple_fn in list_store:
- id_change = self._store_get_ids(cr, user, id_new, tuple_fn, context)
- self._store_set_values(cr, user, id_new, tuple_fn, id_change, context)
-
return id_new
- def _store_get_ids(self, cr, uid, ids, tuple_fn, context):
- parent_id = getattr(self.pool.get(tuple_fn[0]), tuple_fn[4].func_name)(cr, uid, [ids])
- return parent_id
-
- def _store_set_values(self, cr, uid, ids, tuple_fn, parent_id, context):
- name = tuple_fn[1]
- table = tuple_fn[0]
- args = {}
- vals_tot = getattr(self.pool.get(table), tuple_fn[2])(cr, uid, parent_id, name, args, context)
- write_dict = {}
- for id in vals_tot.keys():
- write_dict[name] = vals_tot[id]
- self.pool.get(table).write(cr, uid, [id], write_dict)
- return True
+ def _store_get_values(self, cr, uid, ids, fields, context):
+ result = {}
+ fncts = self.pool._store_function.get(self._name, [])
+ for fnct in range(len(fncts)):
+ result.setdefault(fncts[fnct][0], {})
+ ids2 = fncts[fnct][2](self,cr, uid, ids, context)
+ for id in filter(None, ids2):
+ result[fncts[fnct][0]].setdefault(id, [])
+ result[fncts[fnct][0]][id].append(fnct)
+ result2 = []
+ for object in result:
+ k2 = {}
+ for id,fnct in result[object].items():
+ k2.setdefault(tuple(fnct), [])
+ k2[tuple(fnct)].append(id)
+ for fnct,id in k2.items():
+ result2.append((fncts[fnct[0]][4],object,id,map(lambda x: fncts[x][1], fnct)))
+ result2.sort()
+ return result2
+
+ def _store_set_values(self, cr, uid, ids, fields, context):
+ todo = {}
+ keys = []
+ for f in fields:
+ if self._columns[f]._multi not in keys:
+ keys.append(self._columns[f]._multi)
+ todo.setdefault(self._columns[f]._multi, [])
+ todo[self._columns[f]._multi].append(f)
+ for key in keys:
+ val = todo[key]
+ if key:
+ result = self._columns[val[0]].get(cr, self, ids, val, uid, context=context)
+ for id,value in result.items():
+ upd0 = []
+ upd1 = []
+ for v in value:
+ if v not in val:
+ continue
+ if self._columns[v]._type in ('many2one', 'one2one'):
+ try:
+ value[v] = value[v][0]
+ except:
+ pass
+ upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
+ upd1.append(self._columns[v]._symbol_set[1](value[v]))
+ upd1.append(id)
+ cr.execute('update "' + self._table + '" set ' + \
+ string.join(upd0, ',') + ' where id = %s', upd1)
- def _update_function_stored(self, cr, user, ids, context=None):
- if not context:
- context = {}
- f = filter(lambda a: isinstance(self._columns[a], fields.function) \
- and self._columns[a].store, self._columns)
- if f:
- result = self.read(cr, user, ids, fields=f, context=context)
- for res in result:
- upd0 = []
- upd1 = []
- for field in res:
- if field not in f:
- continue
- value = res[field]
- if self._columns[field]._type in ('many2one', 'one2one'):
- try:
- value = res[field][0]
- except:
- value = res[field]
- upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
- upd1.append(self._columns[field]._symbol_set[1](value))
- upd1.append(res['id'])
- cr.execute('update "' + self._table + '" set ' + \
- string.join(upd0, ',') + ' where id = %d', upd1)
+ else:
+ for f in val:
+ result = self._columns[f].get(cr, self, ids, f, uid, context=context)
+ for id,value in result.items():
+ if self._columns[f]._type in ('many2one', 'one2one'):
+ try:
+ value = value[0]
+ except:
+ pass
+ cr.execute('update "' + self._table + '" set ' + \
+ '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value),id))
return True
#
return []
if isinstance(ids, (int, long)):
ids = [ids]
- return [(r['id'], str(r[self._rec_name])) for r in self.read(cr, user, ids,
+ return [(r['id'], tools.ustr(r[self._rec_name])) for r in self.read(cr, user, ids,
[self._rec_name], context, load='_classic_write')]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=None):
res = self.name_get(cr, user, ids, context)
return res
- def copy(self, cr, uid, id, default=None, context=None):
+ def copy_data(self, cr, uid, id, default=None, context=None):
if not context:
context = {}
if not default:
default['state'] = self._defaults['state'](self, cr, uid, context)
data = self.read(cr, uid, [id], context=context)[0]
fields = self.fields_get(cr, uid)
+ trans_data=[]
for f in fields:
ftype = fields[f]['type']
# the lines are first duplicated using the wrong (old)
# parent but then are reassigned to the correct one thanks
# to the (4, ...)
- res.append((4, rel.copy(cr, uid, rel_id, context=context)))
+ d,t = rel.copy_data(cr, uid, rel_id, context=context)
+ res.append((0, 0, d))
+ trans_data += t
data[f] = res
elif ftype == 'many2many':
data[f] = [(6, 0, data[f])]
trans_obj = self.pool.get('ir.translation')
trans_name=''
- trans_data=[]
for f in fields:
trans_flag=True
if f in self._columns and self._columns[f].translate:
for v in self._inherits:
del data[self._inherits[v]]
+ return data, trans_data
+ def copy(self, cr, uid, id, default=None, context=None):
+ data, trans_data = self.copy_data(cr, uid, id, default, context)
new_id=self.create(cr, uid, data)
-
for record in trans_data:
del record['id']
record['res_id']=new_id
trans_obj.create(cr,uid,record)
-
return new_id
def check_recursion(self, cr, uid, ids, parent=None):