#
#
-import time
-import datetime
import calendar
-import types
-import string
-import netsvc
-import re
-
+import copy
+import datetime
+import logging
import pickle
+import random
+import re
+import string
+import sys
+import time
+import traceback
+import types
import fields
+import netsvc
import tools
from tools.translate import _
import copy
import sys
-import copy
try:
from lxml import etree
except ImportError:
sys.stderr.write("ERROR: Import lxml module\n")
sys.stderr.write("ERROR: Try to install the python-lxml package\n")
- sys.exit(2)
-
from tools.config import config
-regex_order = re.compile('^([a-z0-9_]+( *desc| *asc)?( *, *|))+$', re.I)
+regex_order = re.compile('^(([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
+
+
+POSTGRES_CONFDELTYPES = {
+ 'RESTRICT': 'r',
+ 'NO ACTION': 'a',
+ 'CASCADE': 'c',
+ 'SET NULL': 'n',
+ 'SET DEFAULT': 'd',
+}
def last_day_of_current_month():
- import datetime
- import calendar
today = datetime.date.today()
last_day = str(calendar.monthrange(today.year, today.month)[1])
return time.strftime('%Y-%m-' + last_day)
class browse_record(object):
+ logger = netsvc.Logger()
+
def __init__(self, cr, uid, id, table, cache, context=None, list_class = None, fields_process={}):
'''
table : the object (inherited from orm)
self._id = id
self._table = table
self._table_name = self._table._name
+ self.__logger = logging.getLogger(
+ 'osv.browse_record.' + self._table_name)
self._context = context
self._fields_process = fields_process
else:
return getattr(self._table, name)
else:
- logger = netsvc.Logger()
- logger.notifyChannel('orm', netsvc.LOG_ERROR, "Programming error: field '%s' does not exist in object '%s' !" % (name, self._table._name))
- return None
+ self.logger.notifyChannel("browse_record", netsvc.LOG_WARNING,
+ "Field '%s' does not exist in object '%s': \n%s" % (
+ name, self, ''.join(traceback.format_exc())))
+ raise KeyError("Field '%s' does not exist in object '%s'" % (
+ name, self))
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
if col._prefetch:
if not datas:
- # Where did those ids come from? Perhaps old entries in ir_model_data?
- raise except_orm('NoDataError', 'Field %s in %s%s'%(name,self._table_name,str(ids)))
+ # Where did those ids come from? Perhaps old entries in ir_model_dat?
+ self.__logger.warn("No datas found for ids %s in %s",
+ ids, self)
+ raise KeyError('Field %s not found in %s'%(name,self))
# create browse records for 'remote' objects
for data in datas:
if len(str(data['id']).split('-')) > 1:
data['id'] = int(str(data['id']).split('-')[0])
+ new_data = {}
for n, f in ffields:
if f._type in ('many2one', 'one2one'):
if data[n]:
else:
ids2 = data[n]
if ids2:
- data[n] = browse_record(self._cr, self._uid, ids2, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
+ # FIXME: this happen when a _inherits object
+ # overwrite a field of it parent. Need
+ # testing to be sure we got the right
+ # object and not the parent one.
+ if not isinstance(ids2, browse_record):
+ new_data[n] = browse_record(self._cr,
+ self._uid, ids2, obj, self._cache,
+ context=self._context,
+ list_class=self._list_class,
+ fields_process=self._fields_process)
else:
- data[n] = browse_null()
+ new_data[n] = browse_null()
else:
- data[n] = browse_null()
+ new_data[n] = browse_null()
elif f._type in ('one2many', 'many2many') and len(data[n]):
- data[n] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(f._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in data[n]], self._context)
- self._data[data['id']].update(data)
+ new_data[n] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(f._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in data[n]], self._context)
+ elif f._type in ('reference'):
+ if data[n]:
+ if isinstance(data[n], browse_record):
+ new_data[n] = data[n]
+ else:
+ ref_obj, ref_id = data[n].split(',')
+ ref_id = long(ref_id)
+ obj = self._table.pool.get(ref_obj)
+ compids = False
+ new_data[n] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
+ else:
+ new_data[n] = browse_null()
+ else:
+ new_data[n] = data[n]
+ self._data[data['id']].update(new_data)
if not name in self._data[self._id]:
#how did this happen?
- logger = netsvc.Logger()
- logger.notifyChannel("browse_record", netsvc.LOG_ERROR,"Ffields: %s, datas: %s"%(str(fffields),str(datas)))
- logger.notifyChannel("browse_record", netsvc.LOG_ERROR,"Data: %s, Table: %s"%(str(self._data[self._id]),str(self._table)))
- raise AttributeError(_('Unknown attribute %s in %s ') % (str(name),self._table_name))
+ self.logger.notifyChannel("browse_record", netsvc.LOG_ERROR,
+ "Ffields: %s, datas: %s"%(fffields, datas))
+ self.logger.notifyChannel("browse_record", netsvc.LOG_ERROR,
+ "Data: %s, Table: %s"%(self._data[self._id], self._table))
+ raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
return self._data[self._id][name]
def __getattr__(self, name):
-# raise an AttributeError exception.
- return self[name]
+ try:
+ return self[name]
+ except KeyError, e:
+ raise AttributeError(e)
def __contains__(self, name):
return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
f_type = (type_dict[type(f)], type_dict[type(f)])
elif isinstance(f, fields.float):
if f.digits:
- f_type = ('numeric', 'NUMERIC(%d,%d)' % (f.digits[0], f.digits[1]))
+ f_type = ('numeric', 'NUMERIC')
else:
f_type = ('float8', 'DOUBLE PRECISION')
elif isinstance(f, (fields.char, fields.reference)):
elif isinstance(f.selection, list) and isinstance(f.selection[0][0], int):
f_size = -1
else:
- f_size = (hasattr(f, 'size') and f.size) or 16
+ f_size = getattr(f, 'size', None) or 16
if f_size == -1:
f_type = ('int4', 'INTEGER')
f_type = (type_dict[t], type_dict[t])
elif isinstance(f, fields.function) and f._type == 'float':
if f.digits:
- f_type = ('numeric', 'NUMERIC(%d,%d)' % (f.digits[0], f.digits[1]))
+ f_type = ('numeric', 'NUMERIC')
else:
f_type = ('float8', 'DOUBLE PRECISION')
elif isinstance(f, fields.function) and f._type == 'selection':
CONCURRENCY_CHECK_FIELD = '__last_update'
- def read_group(self, cr, user, ids, fields, groupby, context=None):
- context = context or {}
- if not ids:return
- if fields[groupby]['type'] not in ('many2one','date','datetime'):
- raise Exception(_("Type Not supported for Group By: %s :Only many2one,date and datetime are supported ") %(fields[groupby]['type'],))
- qu1 = ' where id in (' + ','.join([str(id) for id in ids]) + ')'
- qu2 = ''
- # construct a clause for the rules :
- d1, d2 = self.pool.get('ir.rule').domain_get(cr, user, self._name)
- if d1:
- qu1 = qu1 and qu1+' and '+d1 or ' where '+d1
- qu2 += d2
-
- float_int_fields = list(field_name for field_name,values in fields.items() if values['type'] in ('float','integer'))
- sum = {}
-
- cr.execute('select '+groupby+' from ' + self._table +qu1+' group by '+groupby+qu2)
- parent_res = cr.fetchall()
- try:
- parent_res = [(m[0],m[1]) for m in parent_res if m[0]]
- except:
- parent_res = [(m[0],) for m in parent_res if m[0]]
- groupby_ids = map(lambda x:x[0],parent_res)
-
- groupby_name = {}
- child_ids_dict = {}
-
- if fields[groupby]['type'] == 'many2one':
- groupby_name = dict(self.pool.get(fields[groupby]['relation']).name_get(cr,user,groupby_ids,context))
- chqu1 =' where id in (' + ','.join([str(id) for id in ids]) + ') and '
-
- # get child ids
- for parent_id in groupby_ids:
- cr.execute('select id from '+self._table+chqu1 +groupby+" = '%s'"%(parent_id,))
- child_ids = cr.fetchall()
- for val in child_ids:
- if parent_id not in child_ids_dict:
- child_ids_dict[parent_id] = [val[0]]
- else:
- child_ids_dict[parent_id].append(val[0])
- #create [{},{}] for parent ids i.e for group by field
+ def view_init(self, cr , uid , fields_list, context=None):
+ """Override this method to do specific things when a view on the object is opened."""
+ pass
- result = []
- if fields[groupby]['type'] in ('date','datetime'):
- curr_date = datetime.date.today()
- yesterday = (curr_date - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
- lastweek = (curr_date + datetime.timedelta(weeks=-1)).strftime('%Y-%m-%d')
- date_result = {'Today':{'group_child':[]},'Yesterday':{'group_child':[]},
- 'LastWeek':{'group_child':[]},'Old':{'group_child':[]}}
- for x in parent_res:
- db_date = x[0][:10]
- if db_date == curr_date.strftime('%Y-%m-%d'):
- date_format = 'Today'
- elif db_date == yesterday:
- date_format = 'Yesterday'
- elif (db_date < yesterday) and (db_date >= lastweek):
- date_format = 'LastWeek'
- else:
- date_format = 'Old'
- date_result[date_format].update({'id':None,groupby:date_format})
- date_result[date_format]['group_child'] += child_ids_dict[x[0]]
- float_int_sum = self.read(cr, user, child_ids_dict[x[0]], float_int_fields, context)
- for value in float_int_sum:
- for field,val in value.items():
- if field == 'id':continue
- if field not in sum:
- sum[field] = 0.0
- sum[field] += val
- for k,v in sum.items():
- date_result[date_format][k] = v
- sum = {}
- for key,val in date_result.items():
- if len(date_result[key]) == 1:
- del date_result[key]
- continue
- for field in fields.keys():
- if field not in val:
- val[field] = False
- if key in ('Today','Yesterday') and len(result):
- result.insert(0,val)
- else:
- result.insert(-1,val)
- return result
- else:
- for x in parent_res:
- parent_val_dict = {'id':str(x[0])+':Gpby',groupby:(x[0],groupby_name[x[0]]),'group_child':child_ids_dict[x[0]]}
- float_int_sum = self.read(cr, user, child_ids_dict[x[0]], float_int_fields, context)
- for value in float_int_sum:
- for field,val in value.items():
- if field == 'id':continue
- if field not in sum:
- sum[field] = 0.0
- sum[field] += val
- for k,v in sum.items():
- parent_val_dict[k] = v
- sum = {}
- for field in fields.keys():
- if field not in parent_val_dict:
- parent_val_dict[field] = False
- result.append(parent_val_dict)
- return result
+ def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None):
+ raise _('The read_group method is not implemented on this object !')
def _field_create(self, cr, context={}):
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
model_id = cr.fetchone()[0]
if 'module' in context:
name_id = 'model_'+self._name.replace('.','_')
- cr.execute('select * from ir_model_data where name=%s and res_id=%s', (name_id,model_id))
+ cr.execute('select * from ir_model_data where name=%s and res_id=%s and module=%s', (name_id,model_id,context['module']))
if not cr.rowcount:
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, now(), now(), %s, %s, %s)", \
(name_id, context['module'], 'ir.model', model_id)
'ttype': f._type,
'relation': f._obj or 'NULL',
'view_load': (f.view_load and 1) or 0,
- 'select_level': str(f.select or 0),
+ 'select_level': tools.ustr(f.select or 0),
'readonly':(f.readonly and 1) or 0,
'required':(f.required and 1) or 0,
'selectable' : (f.selectable and 1) or 0,
+ 'relation_field': (f._type=='one2many' and isinstance(f,fields.one2many)) and f._fields_id or '',
}
+ # When its a custom field,it does not contain f.select
+ if context.get('field_state','base') == 'manual':
+ if context.get('field_name','') == k:
+ vals['select_level'] = context.get('select','0')
+ #setting value to let the problem NOT occur next time
+ else:
+ vals['select_level'] = cols[k]['select_level']
+
if k not in cols:
cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
id = cr.fetchone()[0]
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
- relation,view_load,state,select_level
+ relation,view_load,state,select_level,relation_field
) VALUES (
- %s,%s,%s,%s,%s,%s,%s,%s,%s,%s
+ %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], bool(vals['view_load']), 'base',
- vals['select_level']
+ vals['select_level'],vals['relation_field']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.commit()
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
- view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s
+ view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'], bool(vals['view_load']),
- vals['select_level'], bool(vals['readonly']),bool(vals['required']),bool(vals['selectable']),vals['model'], vals['name']
+ vals['select_level'], bool(vals['readonly']),bool(vals['required']),bool(vals['selectable']),vals['relation_field'],vals['model'], vals['name']
))
continue
cr.commit()
else:
module, xml_id = current_module, line[i]
id = ir_model_data_obj._get_id(cr, uid, module, xml_id)
+
res_res_id = ir_model_data_obj.read(cr, uid, [id],
['res_id'])
if res_res_id:
except Exception, e:
import psycopg2
import osv
+ cr.rollback()
if isinstance(e,psycopg2.IntegrityError):
msg= _('Insertion Failed! ')
for key in self.pool._sql_error.keys():
if isinstance(e, osv.orm.except_orm ):
msg = _('Insertion Failed! ' + e[1])
return (-1, res, 'Line ' + str(counter) +' : ' + msg, '' )
+ #Raising Uncaught exception
+ raise
for lang in translate:
context2 = context.copy()
context2['lang'] = lang
self._invalids.clear()
def default_get(self, cr, uid, fields_list, context=None):
+ """ Set default values for the object's fields.
+
+ Returns a dict of {field_name:default_value}
+
+ Arguments:
+ `fields_list`: the fields for which the object doesn't have
+ any value yet, and default values need to be
+ provided. If fields outside this list are
+ returned, the user-provided values will be
+ overwritten.
+ """
return {}
def perm_read(self, cr, user, ids, context=None, details=True):
if fields and f not in fields:
continue
res[f] = {'type': self._columns[f]._type}
- for arg in ('string', 'readonly', 'states', 'size', 'required',
- 'change_default', 'translate', 'help', 'select', 'selectable'):
+ for arg in ('string', 'readonly', 'states', 'size', 'required', 'group_operator',
+ 'change_default', 'translate', 'help', 'select', 'selectable','parent_field'):
if getattr(self._columns[f], arg):
res[f][arg] = getattr(self._columns[f], arg)
if not read_access:
res[f]['readonly'] = True
res[f]['states'] = {}
for arg in ('digits', 'invisible','filters'):
- if hasattr(self._columns[f], arg) \
- and getattr(self._columns[f], arg):
+ if getattr(self._columns[f], arg, None):
res[f][arg] = getattr(self._columns[f], arg)
#TODO: optimize
if column._domain and not isinstance(column._domain, (str, unicode)):
dom = column._domain
dom += eval(node.get('domain','[]'), {'uid':user, 'time':time})
+ context.update(eval(node.get('context','{}')))
attrs['selection'] = self.pool.get(relation).name_search(cr, user, '', dom, context=context)
if (node.get('required') and not int(node.get('required'))) or not column.required:
attrs['selection'].append((False,''))
button.set('readonly', str(int(not can_click)))
arch = etree.tostring(node, encoding="utf-8").replace('\t', '')
- fields = self.fields_get(cr, user, fields_def.keys(), context)
+
+ #code for diagram view.
+ fields={}
+ if node.tag=='diagram':
+ if node.getchildren()[0].tag=='node':
+ node_fields=self.pool.get(node.getchildren()[0].get('object')).fields_get(cr, user, fields_def.keys(), context)
+ if node.getchildren()[1].tag=='arrow':
+ arrow_fields = self.pool.get(node.getchildren()[1].get('object')).fields_get(cr, user, fields_def.keys(), context)
+ for key,value in node_fields.items():
+ fields[key]=value
+ for key,value in arrow_fields.items():
+ fields[key]=value
+ else:
+ fields = self.fields_get(cr, user, fields_def.keys(), context)
+
for field in fields_def:
if field == 'id':
# sometime, the view may containt the (invisible) field 'id' needed for a domain (when 2 objects have cross references)
msg += "\n\nEither you wrongly customised this view, or some modules bringing those views are not compatible with your current data model"
netsvc.Logger().notifyChannel('orm', netsvc.LOG_ERROR, msg)
raise except_orm('View error', msg)
-
return arch, fields
def __get_default_calendar_view(self):
return arch
+ def __get_default_search_view(self, cr, uid, context={}):
+
+ def encode(s):
+ if isinstance(s, unicode):
+ return s.encode('utf8')
+ return s
+
+ view = self.fields_view_get(cr, uid, False, 'form', context)
+
+ root = etree.fromstring(encode(view['arch']))
+ res = etree.XML("<search string='%s'></search>" % root.get("string", ""))
+ node = etree.Element("group")
+ res.append(node)
+
+ fields = root.xpath("//field[@select=1]")
+ for field in fields:
+ node.append(field)
+
+ return etree.tostring(res, encoding="utf-8").replace('\t', '')
+
#
# if view_id, view_type is not required
#
if pos == 'replace':
parent = node.getparent()
if parent is None:
- src = copy.deepcopy(node2.getchildren()[0])
+ src = copy.deepcopy(node2[0])
else:
for child in node2:
node.addprevious(child)
inherit_id IS NULL
ORDER BY priority''', (self._name, view_type))
sql_res = cr.fetchone()
- if not sql_res and view_type == 'search':
- cr.execute('''SELECT
- arch,name,field_parent,id,type,inherit_id
- FROM
- ir_ui_view
- WHERE
- model=%s AND
- type=%s AND
- inherit_id IS NULL
- ORDER BY priority''', (self._name, 'form'))
- sql_res = cr.fetchone()
+
if not sql_res:
break
+
ok = sql_res[5]
view_id = ok or sql_res[3]
model = False
result['name'] = sql_res[1]
result['field_parent'] = sql_res[2] or False
else:
+
# otherwise, build some kind of default view
if view_type == 'form':
res = self.fields_get(cr, user, context=context)
if res[x]['type'] == 'text':
xml += "<newline/>"
xml += "</form>"
+
elif view_type == 'tree':
_rec_name = self._rec_name
if _rec_name not in self._columns:
xml = '<?xml version="1.0" encoding="utf-8"?>' \
'<tree string="%s"><field name="%s"/></tree>' \
% (self._description, self._rec_name)
+
elif view_type == 'calendar':
xml = self.__get_default_calendar_view()
+
+ elif view_type == 'search':
+ xml = self.__get_default_search_view(cr, user, context)
+
else:
xml = '<?xml version="1.0"?>' # what happens here, graph case?
- # raise except_orm(_('Invalid Architecture!'),_("There is no view of type '%s' defined for the structure!") % view_type)
+ raise except_orm(_('Invalid Architecture!'),_("There is no view of type '%s' defined for the structure!") % view_type)
result['arch'] = etree.fromstring(encode(xml))
result['name'] = 'default'
result['field_parent'] = False
if context and context.get('active_id',False):
data_menu = self.pool.get('ir.ui.menu').browse(cr, user, context['active_id'], context).action
if data_menu:
- act_id = int(data_menu.split(',')[1])
+ act_id = data_menu.id
if act_id:
data_action = self.pool.get('ir.actions.act_window').browse(cr, user, [act_id], context)[0]
- result['submenu'] = hasattr(data_action,'menus') and data_action.menus or False
+ result['submenu'] = getattr(data_action,'menus', False)
if toolbar:
def clean(x):
x = x[2]
'action': resaction,
'relate': resrelate
}
+ if result['type']=='form' and result['arch'].count("default_focus")>1:
+ msg = "Form View contain more than one default_focus attribute"
+ netsvc.Logger().notifyChannel('orm', netsvc.LOG_ERROR, msg)
+ raise except_orm('View Error !',msg)
return result
_view_look_dom_arch = __view_look_dom_arch
return id_new
def default_get(self, cr, uid, fields_list, context=None):
+ self.view_init(cr, uid, fields_list, context)
if not context:
context = {}
value = {}
_table = None
_protected = ['read','write','create','default_get','perm_read','unlink','fields_get','fields_view_get','search','name_get','distinct_field_get','name_search','copy','import_data','search_count', 'exists']
+ def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None):
+ groupby_list = groupby
+ if isinstance(groupby, list):
+ groupby = groupby[0]
+ context = context or {}
+ self.pool.get('ir.model.access').check(cr, uid, self._name, 'read', context=context)
+ if not fields:
+ fields = self._columns.keys()
+
+ (where_clause, where_params, tables) = self._where_calc(cr, uid, domain, context=context)
+ dom = self.pool.get('ir.rule').domain_get(cr, uid, self._name, 'read', context=context)
+ where_clause = where_clause + dom[0]
+ where_params = where_params + dom[1]
+ for t in dom[2]:
+ if t not in tables:
+ tables.append(t)
+
+ # Take care of adding join(s) if groupby is an '_inherits'ed field
+ tables, where_clause = self._inherits_join_calc(groupby,tables,where_clause)
+
+ if len(where_clause):
+ where_clause = ' where '+string.join(where_clause, ' and ')
+ else:
+ where_clause = ''
+ limit_str = limit and ' limit %d' % limit or ''
+ offset_str = offset and ' offset %d' % offset or ''
+
+ fget = self.fields_get(cr, uid, fields)
+ float_int_fields = filter(lambda x: fget[x]['type'] in ('float','integer'), fields)
+ sum = {}
+
+ group_by = groupby
+ if fget.get(groupby,False) and fget[groupby]['type'] in ('date','datetime'):
+ flist = "to_char(%s,'yyyy-mm') as %s "%(groupby,groupby)
+ groupby = "to_char(%s,'yyyy-mm')"%(groupby)
+ else:
+ flist = groupby
+
+ fields_pre = [f for f in float_int_fields if
+ f == self.CONCURRENCY_CHECK_FIELD
+ or (f in self._columns and getattr(self._columns[f], '_classic_write'))]
+ for f in fields_pre:
+ if f not in ['id','sequence']:
+ operator = fget[f].get('group_operator','sum')
+ flist += ','+operator+'('+f+') as '+f
+
+ cr.execute('select min(%s.id) as id,' % self._table + flist + ' from ' + ','.join(tables) + where_clause + ' group by '+ groupby + limit_str + offset_str, where_params)
+ alldata = {}
+ groupby = group_by
+ for r in cr.dictfetchall():
+ for fld,val in r.items():
+ if val == None:r[fld] = False
+ alldata[r['id']] = r
+ del r['id']
+ data = self.read(cr, uid, alldata.keys(), [groupby], context=context)
+ today = datetime.date.today()
+
+ for d in data:
+ d['__domain'] = [(groupby,'=',alldata[d['id']][groupby] or False)] + domain
+ d['__context'] = {'group_by':groupby_list[1:]}
+ if fget.has_key(groupby):
+ if d[groupby] and fget[groupby]['type'] in ('date','datetime'):
+ dt = datetime.datetime.strptime(alldata[d['id']][groupby][:7],'%Y-%m')
+ days = calendar.monthrange(dt.year, dt.month)[1]
+
+ d[groupby] = datetime.datetime.strptime(d[groupby][:10],'%Y-%m-%d').strftime('%B %Y')
+ d['__domain'] = [(groupby,'>=',alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-01','%Y-%m-%d').strftime('%Y-%m-%d') or False),\
+ (groupby,'<=',alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-' + str(days),'%Y-%m-%d').strftime('%Y-%m-%d') or False)] + domain
+ elif fget[groupby]['type'] == 'many2one':
+ d[groupby] = d[groupby] and ((type(d[groupby])==type(1)) and d[groupby] or d[groupby][1]) or ''
+
+ del alldata[d['id']][groupby]
+ d.update(alldata[d['id']])
+ del d['id']
+ return data
+
+ def _inherits_join_calc(self, field, tables, where_clause):
+ """ Adds missing table select and join clause(s) for reaching
+ the field coming from an '_inherits' parent table.
+ @param tables: list of table._table names enclosed in double quotes as returned
+ by _where_calc()
+ """
+ current_table = self
+ while field in current_table._inherit_fields and not field in current_table._columns:
+ parent_table = self.pool.get(current_table._inherit_fields[field][0])
+ parent_table_name = parent_table._table
+ if '"%s"'%parent_table_name not in tables:
+ tables.append('"%s"'%parent_table_name)
+ where_clause.append('(%s.%s = %s.id)' % (current_table._table, current_table._inherits[parent_table._name], parent_table_name))
+ current_table = parent_table
+ return (tables, where_clause)
+
def _parent_store_compute(self, cr):
logger = netsvc.Logger()
logger.notifyChannel('orm', netsvc.LOG_INFO, 'Computing parent left and right for table %s...' % (self._table, ))
create = False
todo_end = []
self._field_create(cr, context=context)
- if not hasattr(self, "_auto") or self._auto:
+ if getattr(self, '_auto', True):
cr.execute("SELECT relname FROM pg_class WHERE relkind in ('r','v') AND relname='%s'" % self._table)
if not cr.rowcount:
- cr.execute("CREATE TABLE \"%s\" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITH OIDS" % self._table)
+ cr.execute("CREATE TABLE \"%s\" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS" % self._table)
+ cr.execute("COMMENT ON TABLE \"%s\" IS '%s'" % (self._table, self._description.replace("'","''")))
create = True
cr.commit()
if self._parent_store:
# iterate on the "object columns"
todo_update_store = []
+ update_custom_fields = context.get('update_custom_fields', False)
for k in self._columns:
if k in ('id', 'write_uid', 'write_date', 'create_uid', 'create_date'):
continue
#raise _('Can not define a column %s. Reserved keyword !') % (k,)
+ #Not Updating Custom fields
+ if k.startswith('x_') and not update_custom_fields:
+ continue
f = self._columns[k]
if isinstance(f, fields.one2many):
cr.execute("SELECT relname FROM pg_class WHERE relkind='r' AND relname=%s", (f._obj,))
+
+ if self.pool.get(f._obj):
+ if f._fields_id not in self.pool.get(f._obj)._columns.keys():
+ if not self.pool.get(f._obj)._inherits or (f._fields_id not in self.pool.get(f._obj)._inherit_fields.keys()):
+ raise except_orm('Programming Error', ("There is no reference field '%s' found for '%s'") % (f._fields_id,f._obj,))
+
if cr.fetchone():
cr.execute("SELECT count(1) as c FROM pg_class c,pg_attribute a WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid", (f._obj, f._fields_id))
res = cr.fetchone()[0]
elif isinstance(f, fields.many2many):
cr.execute("SELECT relname FROM pg_class WHERE relkind in ('r','v') AND relname=%s", (f._rel,))
if not cr.dictfetchall():
- #FIXME: Remove this try/except
- try:
- ref = self.pool.get(f._obj)._table
- except AttributeError:
- ref = f._obj.replace('.', '_')
+ if not self.pool.get(f._obj):
+ raise except_orm('Programming Error', ('There is no reference available for %s') % (f._obj,))
+ ref = self.pool.get(f._obj)._table
+# ref = f._obj.replace('.', '_')
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL REFERENCES "%s" ON DELETE CASCADE, "%s" INTEGER NOT NULL REFERENCES "%s" ON DELETE CASCADE) WITH OIDS' % (f._rel, f._id1, self._table, f._id2, ref))
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (f._rel, f._id1, f._rel, f._id1))
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (f._rel, f._id2, f._rel, f._id2))
+ cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (f._rel, self._table, ref))
cr.commit()
else:
cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size " \
# add the missing field
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
+ cr.execute("COMMENT ON COLUMN %s.%s IS '%s'" % (self._table, k, f.string.replace("'","''")))
# initialize it
if not create and k in self._defaults:
# and add constraints if needed
if isinstance(f, fields.many2one):
- #FIXME: Remove this try/except
- try:
- ref = self.pool.get(f._obj)._table
- except AttributeError:
- ref = f._obj.replace('.', '_')
+ if not self.pool.get(f._obj):
+ raise except_orm('Programming Error', ('There is no reference available for %s') % (f._obj,))
+ ref = self.pool.get(f._obj)._table
+# ref = f._obj.replace('.', '_')
# ir_actions is inherited so foreign key doesn't work on it
if ref != 'ir_actions':
cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (self._table, k, ref, f.ondelete))
f_pg_type = f_pg_def['typname']
f_pg_size = f_pg_def['size']
f_pg_notnull = f_pg_def['attnotnull']
- if isinstance(f, fields.function) and not f.store and (not hasattr(f,'nodrop') or not f.nodrop):
+ if isinstance(f, fields.function) and not f.store and\
+ not getattr(f, 'nodrop', False):
logger.notifyChannel('orm', netsvc.LOG_INFO, 'column %s (%s) in table %s removed: converted to a function !\n' % (k, f.string, self._table))
- cr.execute('ALTER TABLE "%s" DROP COLUMN "%s"'% (self._table, k))
+ cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE'% (self._table, k))
cr.commit()
f_obj_type = None
else:
cr.commit()
for c in casts:
if (f_pg_type==c[0]) and (f._type==c[1]):
- # Adding upcoming 6 lines to check whether only the size of the fields got changed or not.E.g. :(16,3) to (16,4)
- field_size_change = False
- if f_pg_type in ['int4','numeric','float8']:
- if f.digits:
- field_size = (65535 * f.digits[0]) + f.digits[0] + f.digits[1]
- if field_size != f_pg_size:
- field_size_change = True
-
- if f_pg_type != f_obj_type or field_size_change:
+ if f_pg_type != f_obj_type:
if f_pg_type != f_obj_type:
logger.notifyChannel('orm', netsvc.LOG_INFO, "column '%s' in table '%s' changed type to %s." % (k, self._table, c[1]))
- if field_size_change:
- logger.notifyChannel('orm', netsvc.LOG_INFO, "column '%s' in table '%s' changed in the size." % (k, self._table))
ok = True
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
"AND con.contype = 'f'", (self._table, ref, k, 'id'))
res = cr.dictfetchall()
if res:
- confdeltype = {
- 'RESTRICT': 'r',
- 'NO ACTION': 'a',
- 'CASCADE': 'c',
- 'SET NULL': 'n',
- 'SET DEFAULT': 'd',
- }
- if res[0]['confdeltype'] != confdeltype.get(f.ondelete.upper(), 'a'):
+ if res[0]['confdeltype'] != POSTGRES_CONFDELTYPES.get(f.ondelete.upper(), 'a'):
cr.execute('ALTER TABLE "' + self._table + '" DROP CONSTRAINT "' + res[0]['conname'] + '"')
cr.execute('ALTER TABLE "' + self._table + '" ADD FOREIGN KEY ("' + k + '") REFERENCES "' + ref + '" ON DELETE ' + f.ondelete)
cr.commit()
if not hasattr(self, '_log_access'):
# if not access is not specify, it is the same value as _auto
- self._log_access = not hasattr(self, "_auto") or self._auto
+ self._log_access = getattr(self, "_auto", True)
self._columns = self._columns.copy()
for store_field in self._columns:
f = self._columns[store_field]
+ if hasattr(f, 'digits_change'):
+ f.digits_change(cr)
if not isinstance(f, fields.function):
continue
if not f.store:
elif field['ttype'] == 'one2many':
self._columns[field['name']] = getattr(fields, field['ttype'])(field['relation'], field['relation_field'], **attrs)
elif field['ttype'] == 'many2many':
- import random
_rel1 = field['relation'].replace('.', '_')
_rel2 = field['model'].replace('.', '_')
_rel_name = 'x_%s_%s_%s_rel' %(_rel1, _rel2, field['name'])
fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
if fld_def._type in ('many2one', 'one2one'):
obj = self.pool.get(fld_def._obj)
- if not obj.search(cr, uid, [('id', '=', field_value)]):
+ if not obj.search(cr, uid, [('id', '=', field_value or False)]):
continue
if fld_def._type in ('many2many'):
obj = self.pool.get(fld_def._obj)
field_value = field_value2
value[field] = field_value
for key in context or {}:
- if key.startswith('default_'):
+ if key.startswith('default_') and (key[8:] in fields_list):
value[key[8:]] = context[key]
return value
#
# Update objects that uses this one to update their _inherits fields
#
+
def _inherits_reload_src(self):
for obj in self.pool.obj_pool.values():
if self._name in obj._inherits:
select = [ids]
else:
select = ids
-
select = map(lambda x: isinstance(x,dict) and x['id'] or x, select)
result = self._read_flat(cr, user, select, fields, context, load)
for r in result:
if not id_exist:
cr.execute('update "'+self._table+'" set "'+key+'"=NULL where "%s"=%s' %(key,''.join("'"+str(v)+"'")))
r[key] = ''
-
if isinstance(ids, (int, long, dict)):
return result and result[0] or False
return result
def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
if not context:
context = {}
+ #ids = map(lambda x:int(x), ids)
if not ids:
return []
- ids = map(lambda x:int(x), ids)
if fields_to_read == None:
fields_to_read = self._columns.keys()
# construct a clause for the rules :
- d1, d2 = self.pool.get('ir.rule').domain_get(cr, user, self._name)
-
+ d1, d2, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
# all inherited fields + all non inherited fields for which the attribute whose name is in load is True
fields_pre = [f for f in fields_to_read if
f == self.CONCURRENCY_CHECK_FIELD
return "date_trunc('second', %s) as %s" % (f, f)
if f == self.CONCURRENCY_CHECK_FIELD:
if self._log_access:
- return "COALESCE(write_date, create_date, now())::timestamp AS %s" % (f,)
+ return "COALESCE(%s.write_date, %s.create_date, now())::timestamp AS %s" % (self._table, self._table, f,)
return "now()::timestamp AS %s" % (f,)
if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
return 'length("%s") as "%s"' % (f, f)
return '"%s"' % (f,)
fields_pre2 = map(convert_field, fields_pre)
- for i in range(0, len(ids), cr.IN_MAX):
- sub_ids = ids[i:i+cr.IN_MAX]
+ order_by = self._parent_order or self._order
+ for sub_ids in cr.split_for_in_conditions(ids):
if d1:
- cr.execute('SELECT %s FROM \"%s\" WHERE id = ANY (%%s) AND %s ORDER BY %s' % \
- (','.join(fields_pre2 + ['id']), self._table, d1,
- self._order),[sub_ids,]+d2)
- if not cr.rowcount == len({}.fromkeys(sub_ids)):
+ cr.execute('SELECT %s FROM %s WHERE %s.id IN %%s AND %s ORDER BY %s' % \
+ (','.join(fields_pre2 + [self._table + '.id']), ','.join(tables), self._table, ' and '.join(d1),
+ order_by),[sub_ids,]+d2)
+ if cr.rowcount != len(sub_ids):
raise except_orm(_('AccessError'),
- _('You try to bypass an access rule (Document type: %s).') % self._description)
+ _('You try to bypass an access rule while reading (Document type: %s).') % self._description)
else:
- cr.execute('SELECT %s FROM \"%s\" WHERE id = ANY (%%s) ORDER BY %s' % \
- (','.join(fields_pre2 + ['id']), self._table,
- self._order), (sub_ids,))
+ cr.execute('SELECT %s FROM \"%s\" WHERE id IN %%s ORDER BY %s' %
+ (','.join(fields_pre2 + ['id']), self._table,
+ order_by), (sub_ids,))
res.extend(cr.dictfetchall())
else:
res = map(lambda x: {'id': x}, ids)
del r['id']
for record in res:
+ if not record[col]:# if the record is deleted from _inherits table?
+ continue
record.update(res3[record[col]])
if col not in fields_to_read:
del record[col]
# all fields which need to be post-processed by a simple function (symbol_get)
fields_post = filter(lambda x: x in self._columns and self._columns[x]._symbol_get, fields_to_read)
if fields_post:
- # maybe it would be faster to iterate on the fields then on res, so that we wouldn't need
- # to get the _symbol_get in each occurence
for r in res:
for f in fields_post:
r[f] = self._columns[f]._symbol_get(r[f])
res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res)
for pos in val:
for record in res:
+ if isinstance(res2[record['id']], str):res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6
record[pos] = res2[record['id']][pos]
else:
for f in val:
if res and res[0]:
raise except_orm('ConcurrencyException', _('Records were modified in the meanwhile'))
+ def check_access_rule(self, cr, uid, ids, operation, context=None):
+ """Verifies that the operation given by ``operation`` is allowed for the user
+ according to ir.rules.
+ @param ``operation``: one of ``'read'``, ``'write'``, ``'unlink'``
+ @raise ``except_orm``: if current ir.rules do not permit this operation.
+ @return: ``None`` if the operation is allowed
+ """
+ where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
+ if where_clause:
+ where_clause = ' and ' + ' and '.join(where_clause)
+ for sub_ids in cr.split_for_in_conditions(ids):
+ cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
+ ' WHERE ' + self._table + '.id IN %s' + where_clause,
+ [sub_ids] + where_params)
+ if cr.rowcount != len(sub_ids):
+ raise except_orm(_('AccessError'),
+ _('Operation prohibited by access rules (Operation: %s, Document type: %s).')
+ % (operation, self._name))
+
def unlink(self, cr, uid, ids, context=None):
if not ids:
return True
# ids2 = [x[self._inherits[key]] for x in res]
# self.pool.get(key).unlink(cr, uid, ids2)
- d1, d2 = self.pool.get('ir.rule').domain_get(cr, uid, self._name)
- if d1:
- d1 = ' AND '+d1
-
- for i in range(0, len(ids), cr.IN_MAX):
- sub_ids = ids[i:i+cr.IN_MAX]
- str_d = string.join(('%s',)*len(sub_ids), ',')
- if d1:
- cr.execute('SELECT id FROM "'+self._table+'" ' \
- 'WHERE id IN ('+str_d+')'+d1, sub_ids+d2)
- if not cr.rowcount == len(sub_ids):
- raise except_orm(_('AccessError'),
- _('You try to bypass an access rule (Document type: %s).') % \
- self._description)
-
- if d1:
- cr.execute('delete from "'+self._table+'" ' \
- 'where id in ('+str_d+')'+d1, sub_ids+d2)
- else:
- cr.execute('delete from "'+self._table+'" ' \
- 'where id in ('+str_d+')', sub_ids)
+ self.check_access_rule(cr, uid, ids, 'unlink', context=context)
+ for sub_ids in cr.split_for_in_conditions(ids):
+ cr.execute('delete from ' + self._table + ' ' \
+ 'where id in %s', sub_ids)
for order, object, store_ids, fields in result_store:
- if object<>self._name:
+ if object != self._name:
obj = self.pool.get(object)
cr.execute('select id from '+obj._table+' where id in ('+','.join(map(str, store_ids))+')')
rids = map(lambda x: x[0], cr.fetchall())
upd1.append(user)
if len(upd0):
-
- d1, d2 = self.pool.get('ir.rule').domain_get(cr, user, self._name)
- if d1:
- d1 = ' and '+d1
-
- for i in range(0, len(ids), cr.IN_MAX):
- sub_ids = ids[i:i+cr.IN_MAX]
- ids_str = string.join(map(str, sub_ids), ',')
- if d1:
- cr.execute('SELECT id FROM "'+self._table+'" ' \
- 'WHERE id IN ('+ids_str+')'+d1, d2)
- if not cr.rowcount == len({}.fromkeys(sub_ids)):
- raise except_orm(_('AccessError'),
- _('You try to bypass an access rule (Document type: %s).') % \
- self._description)
- else:
- cr.execute('SELECT id FROM "'+self._table+'" WHERE id IN ('+ids_str+')')
- if not cr.rowcount == len({}.fromkeys(sub_ids)):
- raise except_orm(_('AccessError'),
- _('You try to write on an record that doesn\'t exist ' \
- '(Document type: %s).') % self._description)
- if d1:
- cr.execute('update "'+self._table+'" set '+string.join(upd0, ',')+' ' \
- 'where id in ('+ids_str+')'+d1, upd1+ d2)
- else:
- cr.execute('update "'+self._table+'" set '+string.join(upd0, ',')+' ' \
- 'where id in ('+ids_str+')', upd1)
+ self.check_access_rule(cr, user, ids, 'write', context=context)
+ for sub_ids in cr.split_for_in_conditions(ids):
+ cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
+ 'where id in %s', upd1 + [sub_ids])
if totranslate:
# TODO: optimize
for f in direct:
if self._columns[f].translate:
- src_trans = self.pool.get(self._name).read(cr,user,ids,[f])
- self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans[0][f])
+ src_trans = self.pool.get(self._name).read(cr,user,ids,[f])[0][f]
+ if not src_trans:
+ src_trans = vals[f]
+ # Inserting value to DB
+ self.write(cr, user, ids, {f:vals[f]})
+ self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
# call the 'set' method of fields which are not classic_write
for table in self._inherits:
col = self._inherits[table]
nids = []
- for i in range(0, len(ids), cr.IN_MAX):
- sub_ids = ids[i:i+cr.IN_MAX]
- ids_str = string.join(map(str, sub_ids), ',')
+ for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
- 'where id in ('+ids_str+')', upd1)
+ 'where id in %s', (sub_ids,))
nids.extend([x[0] for x in cr.fetchall()])
v = {}
if self._inherits[v] not in vals:
tocreate[v] = {}
else:
- tocreate[v] = {self._inherits[v]:vals[self._inherits[v]]}
+ tocreate[v] = {'id' : vals[self._inherits[v]]}
(upd0, upd1, upd2) = ('', '', [])
upd_todo = []
for v in vals.keys():
for table in tocreate:
if self._inherits[table] in vals:
del vals[self._inherits[table]]
- id = self.pool.get(table).create(cr, user, tocreate[table])
+
+ record_id = tocreate[table].pop('id', None)
+
+ if record_id is None:
+ record_id = self.pool.get(table).create(cr, user, tocreate[table], context=context)
+ else:
+ self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=context)
+
upd0 += ','+self._inherits[table]
upd1 += ',%s'
- upd2.append(id)
+ upd2.append(record_id)
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
upd1 = upd1 + ',' + self._columns[field]._symbol_set[0]
upd2.append(self._columns[field]._symbol_set[1](vals[field]))
else:
- upd_todo.append(field)
+ if not isinstance(self._columns[field], fields.related):
+ upd_todo.append(field)
if field in self._columns \
and hasattr(self._columns[field], 'selection') \
and vals[field]:
upd1 += ',%s,now()'
upd2.append(user)
cr.execute('insert into "'+self._table+'" (id'+upd0+") values ("+str(id_new)+upd1+')', tuple(upd2))
+ d1, d2, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'create', context=context)
+ if d1:
+ d1 = ' AND '+' AND '.join(d1)
+ cr.execute('SELECT '+self._table+'.id FROM '+','.join(tables)+' ' \
+ 'WHERE '+self._table+'.id = ' +str(id_new)+d1,d2)
+ if not cr.rowcount:
+ raise except_orm(_('AccessError'),
+ _('You try to bypass an access rule to create (Document type: %s).') \
+ % self._name)
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
if self._parent_store:
continue
result.setdefault(fncts[fnct][0], {})
- ids2 = fncts[fnct][2](self,cr, uid, ids, context)
+
+ # uid == 1 for accessing objects having rules defined on store fields
+ ids2 = fncts[fnct][2](self,cr, 1, ids, context)
for id in filter(None, ids2):
result[fncts[fnct][0]].setdefault(id, [])
result[fncts[fnct][0]][id].append(fnct)
for key in keys:
val = todo[key]
if key:
- result = self._columns[val[0]].get(cr, self, ids, val, uid, context=context)
+ # uid == 1 for accessing objects having rules defined on store fields
+ result = self._columns[val[0]].get(cr, self, ids, val, 1, context=context)
for id,value in result.items():
if field_flag:
for f in value.keys():
upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
upd1.append(self._columns[v]._symbol_set[1](value[v]))
upd1.append(id)
- cr.execute('update "' + self._table + '" set ' + \
- string.join(upd0, ',') + ' where id = %s', upd1)
+ if upd0 and upd1:
+ cr.execute('update "' + self._table + '" set ' + \
+ string.join(upd0, ',') + ' where id = %s', upd1)
else:
for f in val:
- result = self._columns[f].get(cr, self, ids, f, uid, context=context)
+ # uid == 1 for accessing objects having rules defined on store fields
+ result = self._columns[f].get(cr, self, ids, f, 1, context=context)
for r in result.keys():
if field_flag:
if r in field_dict.keys():
context = {}
# compute the where, order by, limit and offset clauses
(qu1, qu2, tables) = self._where_calc(cr, user, args, context=context)
+ dom = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
+ qu1 = qu1 + dom[0]
+ qu2 = qu2 + dom[1]
+ for t in dom[2]:
+ if t not in tables:
+ tables.append(t)
if len(qu1):
qu1 = ' where '+string.join(qu1, ' and ')
else:
qu1 = ''
+
+ order_by = self._order
if order:
self._check_qorder(order)
- order_by = order or self._order
+ o = order.split(' ')[0]
+ if (o in self._columns) and getattr(self._columns[o], '_classic_write'):
+ order_by = order
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
- # construct a clause for the rules :
- d1, d2 = self.pool.get('ir.rule').domain_get(cr, user, self._name)
- if d1:
- qu1 = qu1 and qu1+' and '+d1 or ' where '+d1
- qu2 += d2
-
if count:
cr.execute('select count(%s.id) from ' % self._table +
','.join(tables) +qu1 + limit_str + offset_str, qu2)