Fields Attributes:
* _classic_read: is a classic sql fields
* _type : field type
+ * _auto_join: for one2many and many2one fields, tells whether select
+ queries will join the relational table instead of replacing the
+ field condition by an equivalent-one based on a search.
* readonly
* required
* size
import base64
import datetime as DT
+import logging
+import pytz
import re
-import string
-import sys
-import warnings
import xmlrpclib
from psycopg2 import Binary
import openerp
-import openerp.netsvc as netsvc
import openerp.tools as tools
from openerp.tools.translate import _
-import json
+from openerp.tools import float_round, float_repr
+from openerp.tools import html_sanitize
+import simplejson
+from openerp import SUPERUSER_ID
+
+_logger = logging.getLogger(__name__)
def _symbol_set(symb):
- if symb == None or symb == False:
+ if symb is None or symb == False:
return None
elif isinstance(symb, unicode):
return symb.encode('utf-8')
"""
_classic_read = True
_classic_write = True
+ _auto_join = False
_prefetch = True
_properties = False
_type = 'unknown'
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
+ # used to hide a certain field type in the list of field types
+ _deprecated = False
+
def __init__(self, string='unknown', required=False, readonly=False, domain=None, context=None, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
"""
self.manual = manual
self.selectable = True
self.group_operator = args.get('group_operator', False)
+ self.groups = False # CSV list of ext IDs of groups that can access this field
+ self.deprecated = False # Optional deprecation warning
for a in args:
- if args[a]:
- setattr(self, a, args[a])
-
+ setattr(self, a, args[a])
+
def restart(self):
pass
res = obj.read(cr, uid, ids, [name], context=context)
return [x[name] for x in res]
+ def as_display_name(self, cr, uid, obj, value, context=None):
+ """Converts a field value to a suitable string representation for a record,
+ e.g. when this field is used as ``rec_name``.
+
+ :param obj: the ``BaseModel`` instance this column belongs to
+ :param value: a proper value as returned by :py:meth:`~openerp.orm.osv.BaseModel.read`
+ for this column
+ """
+ # delegated to class method, so a column type A can delegate
+ # to a column type B.
+ return self._as_display_name(self, cr, uid, obj, value, context=None)
+
+ @classmethod
+ def _as_display_name(cls, field, cr, uid, obj, value, context=None):
+ # This needs to be a class method, in case a column type A as to delegate
+ # to a column type B.
+ return tools.ustr(value)
# ---------------------------------------------------------
# Simple fields
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
- warnings.warn("Making a boolean field `required` has no effect, as NULL values are "
- "automatically turned into False", PendingDeprecationWarning, stacklevel=2)
+ _logger.debug(
+ "required=True is deprecated: making a boolean field"
+ " `required` has no effect, as NULL values are "
+ "automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
- if required:
- warnings.warn("Making an integer field `required` has no effect, as NULL values are "
- "automatically turned into 0", PendingDeprecationWarning, stacklevel=2)
-
-class integer_big(_column):
- """Experimental 64 bit integer column type, currently unused.
-
- TODO: this field should work fine for values up
- to 32 bits, but greater values will not fit
- in the XML-RPC int type, so a specific
- get() method is needed to pass them as floats,
- like what we do for integer functional fields.
- """
- _type = 'integer_big'
- # do not reference the _symbol_* of integer class, as that would possibly
- # unbind the lambda functions
- _symbol_c = '%s'
- _symbol_f = lambda x: int(x or 0)
- _symbol_set = (_symbol_c, _symbol_f)
- _symbol_get = lambda self,x: x or 0
-
- def __init__(self, string='unknown', required=False, **args):
- super(integer_big, self).__init__(string=string, required=required, **args)
- if required:
- warnings.warn("Making an integer_big field `required` has no effect, as NULL values are "
- "automatically turned into 0", PendingDeprecationWarning, stacklevel=2)
class reference(_column):
_type = 'reference'
result[value['id']] = False
return result
-class char(_column):
- _type = 'char'
+ @classmethod
+ def _as_display_name(cls, field, cr, uid, obj, value, context=None):
+ if value:
+ # reference fields have a 'model,id'-like value, that we need to convert
+ # to a real name
+ model_name, res_id = value.split(',')
+ model = obj.pool.get(model_name)
+ if model and res_id:
+ return model.name_get(cr, uid, [int(res_id)], context=context)[0][1]
+ return tools.ustr(value)
+
+# takes a string (encoded in utf8) and returns a string (encoded in utf8)
+def _symbol_set_char(self, symb):
+
+ #TODO:
+ # * we need to remove the "symb==False" from the next line BUT
+ # for now too many things rely on this broken behavior
+ # * the symb==None test should be common to all data types
+ if symb is None or symb == False:
+ return None
- def __init__(self, string, size, **args):
- _column.__init__(self, string=string, size=size, **args)
- self._symbol_set = (self._symbol_c, self._symbol_set_char)
-
- # takes a string (encoded in utf8) and returns a string (encoded in utf8)
- def _symbol_set_char(self, symb):
- #TODO:
- # * we need to remove the "symb==False" from the next line BUT
- # for now too many things rely on this broken behavior
- # * the symb==None test should be common to all data types
- if symb == None or symb == False:
- return None
+ # we need to convert the string to a unicode object to be able
+ # to evaluate its length (and possibly truncate it) reliably
+ u_symb = tools.ustr(symb)
+ return u_symb[:self.size].encode('utf8')
- # we need to convert the string to a unicode object to be able
- # to evaluate its length (and possibly truncate it) reliably
- u_symb = tools.ustr(symb)
+class char(_column):
+ _type = 'char'
- return u_symb[:self.size].encode('utf8')
+ def __init__(self, string="unknown", size=None, **args):
+ _column.__init__(self, string=string, size=size or None, **args)
+ # self._symbol_set_char defined to keep the backward compatibility
+ self._symbol_f = self._symbol_set_char = lambda x: _symbol_set_char(self, x)
+ self._symbol_set = (self._symbol_c, self._symbol_f)
class text(_column):
_type = 'text'
+class html(text):
+ _type = 'html'
+ _symbol_c = '%s'
+ def _symbol_f(x):
+ if x is None or x == False:
+ return None
+ return html_sanitize(x)
+
+ _symbol_set = (_symbol_c, _symbol_f)
+
import __builtin__
class float(_column):
def __init__(self, string='unknown', digits=None, digits_compute=None, required=False, **args):
_column.__init__(self, string=string, required=required, **args)
self.digits = digits
+ # synopsis: digits_compute(cr) -> (precision, scale)
self.digits_compute = digits_compute
- if required:
- warnings.warn("Making a float field `required` has no effect, as NULL values are "
- "automatically turned into 0.0", PendingDeprecationWarning, stacklevel=2)
-
def digits_change(self, cr):
if self.digits_compute:
- t = self.digits_compute(cr)
- self._symbol_set=('%s', lambda x: ('%.'+str(t[1])+'f') % (__builtin__.float(x or 0.0),))
- self.digits = t
+ self.digits = self.digits_compute(cr)
+ if self.digits:
+ precision, scale = self.digits
+ self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
+ precision_digits=scale),
+ precision_digits=scale))
class date(_column):
_type = 'date'
+
@staticmethod
def today(*args):
""" Returns the current date in a format fit for being a
return DT.date.today().strftime(
tools.DEFAULT_SERVER_DATE_FORMAT)
+ @staticmethod
+ def context_today(model, cr, uid, context=None, timestamp=None):
+ """Returns the current date as seen in the client's timezone
+ in a format fit for date fields.
+ This method may be passed as value to initialize _defaults.
+
+ :param Model model: model (osv) for which the date value is being
+ computed - automatically passed when used in
+ _defaults.
+ :param datetime timestamp: optional datetime value to use instead of
+ the current date and time (must be a
+ datetime, regular dates can't be converted
+ between timezones.)
+ :param dict context: the 'tz' key in the context should give the
+ name of the User/Client timezone (otherwise
+ UTC is used)
+ :rtype: str
+ """
+ today = timestamp or DT.datetime.now()
+ context_today = None
+ if context and context.get('tz'):
+ tz_name = context['tz']
+ else:
+ tz_name = model.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
+ if tz_name:
+ try:
+ utc = pytz.timezone('UTC')
+ context_tz = pytz.timezone(tz_name)
+ utc_today = utc.localize(today, is_dst=False) # UTC = no DST
+ context_today = utc_today.astimezone(context_tz)
+ except Exception:
+ _logger.debug("failed to compute context/client-specific today date, "
+ "using the UTC value for `today`",
+ exc_info=True)
+ return (context_today or today).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
+
class datetime(_column):
_type = 'datetime'
@staticmethod
return DT.datetime.now().strftime(
tools.DEFAULT_SERVER_DATETIME_FORMAT)
-class time(_column):
- _type = 'time'
@staticmethod
- def now( *args):
- """ Returns the current time in a format fit for being a
- default value to a ``time`` field.
-
- This method should be proivided as is to the _defaults dict,
- it should not be called.
+ def context_timestamp(cr, uid, timestamp, context=None):
+ """Returns the given timestamp converted to the client's timezone.
+ This method is *not* meant for use as a _defaults initializer,
+ because datetime fields are automatically converted upon
+ display on client side. For _defaults you :meth:`fields.datetime.now`
+ should be used instead.
+
+ :param datetime timestamp: naive datetime value (expressed in UTC)
+ to be converted to the client timezone
+ :param dict context: the 'tz' key in the context should give the
+ name of the User/Client timezone (otherwise
+ UTC is used)
+ :rtype: datetime
+ :return: timestamp converted to timezone-aware datetime in context
+ timezone
"""
- return DT.datetime.now().strftime(
- tools.DEFAULT_SERVER_TIME_FORMAT)
+ assert isinstance(timestamp, DT.datetime), 'Datetime instance expected'
+ if context and context.get('tz'):
+ tz_name = context['tz']
+ else:
+ registry = openerp.modules.registry.RegistryManager.get(cr.dbname)
+ tz_name = registry.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
+ if tz_name:
+ try:
+ utc = pytz.timezone('UTC')
+ context_tz = pytz.timezone(tz_name)
+ utc_timestamp = utc.localize(timestamp, is_dst=False) # UTC = no DST
+ return utc_timestamp.astimezone(context_tz)
+ except Exception:
+ _logger.debug("failed to compute context/client-specific timestamp, "
+ "using the UTC value",
+ exc_info=True)
+ return timestamp
class binary(_column):
_type = 'binary'
_symbol_c = '%s'
- _symbol_f = lambda symb: symb and Binary(symb) or None
+
+ # Binary values may be byte strings (python 2.6 byte array), but
+ # the legacy OpenERP convention is to transfer and store binaries
+ # as base64-encoded strings. The base64 string may be provided as a
+ # unicode in some circumstances, hence the str() cast in symbol_f.
+ # This str coercion will only work for pure ASCII unicode strings,
+ # on purpose - non base64 data must be passed as a 8bit byte strings.
+ _symbol_f = lambda symb: symb and Binary(str(symb)) or None
+
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x and str(x)
# (4, ID) link
# (5) unlink all (only valid for one2many)
#
-#CHECKME: dans la pratique c'est quoi la syntaxe utilisee pour le 5? (5) ou (5, 0)?
-class one2one(_column):
- _classic_read = False
- _classic_write = True
- _type = 'one2one'
-
- def __init__(self, obj, string='unknown', **args):
- warnings.warn("The one2one field doesn't work anymore", DeprecationWarning)
- _column.__init__(self, string=string, **args)
- self._obj = obj
-
- def set(self, cr, obj_src, id, field, act, user=None, context=None):
- if not context:
- context = {}
- obj = obj_src.pool.get(self._obj)
- self._table = obj_src.pool.get(self._obj)._table
- if act[0] == 0:
- id_new = obj.create(cr, user, act[1])
- cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (id_new, id))
- else:
- cr.execute('select '+field+' from '+obj_src._table+' where id=%s', (act[0],))
- id = cr.fetchone()[0]
- obj.write(cr, user, [id], act[1], context=context)
-
- def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
- return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
-
class many2one(_column):
_classic_read = False
_symbol_f = lambda x: x or None
_symbol_set = (_symbol_c, _symbol_f)
- def __init__(self, obj, string='unknown', **args):
+ def __init__(self, obj, string='unknown', auto_join=False, **args):
_column.__init__(self, string=string, **args)
self._obj = obj
+ self._auto_join = auto_join
def get(self, cr, obj, ids, name, user=None, context=None, values=None):
if context is None:
# build a dictionary of the form {'id_of_distant_resource': name_of_distant_resource}
# we use uid=1 because the visibility of a many2one field value (just id and name)
# must be the access right of the parent form and not the linked object itself.
- records = dict(obj.name_get(cr, 1,
+ records = dict(obj.name_get(cr, SUPERUSER_ID,
list(set([x for x in res.values() if isinstance(x, (int,long))])),
context=context))
for id in res:
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
+
+ @classmethod
+ def _as_display_name(cls, field, cr, uid, obj, value, context=None):
+ return value[1] if isinstance(value, tuple) else tools.ustr(value)
+
class one2many(_column):
_classic_read = False
_prefetch = False
_type = 'one2many'
- def __init__(self, obj, fields_id, string='unknown', limit=None, **args):
+ def __init__(self, obj, fields_id, string='unknown', limit=None, auto_join=False, **args):
_column.__init__(self, string=string, **args)
self._obj = obj
self._fields_id = fields_id
self._limit = limit
+ self._auto_join = auto_join
#one2many can't be used as condition for defaults
assert(self.change_default != True)
for id in ids:
res[id] = []
- ids2 = obj.pool.get(self._obj).search(cr, user, self._domain + [(self._fields_id, 'in', ids)], limit=self._limit, context=context)
+ domain = self._domain(obj) if callable(self._domain) else self._domain
+ ids2 = obj.pool.get(self._obj).search(cr, user, domain + [(self._fields_id, 'in', ids)], limit=self._limit, context=context)
for r in obj.pool.get(self._obj)._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
if r[self._fields_id] in res:
res[r[self._fields_id]].append(r['id'])
elif act[0] == 5:
reverse_rel = obj._all_columns.get(self._fields_id)
assert reverse_rel, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
- # if the model has on delete cascade, just delete the rows
+ # if the o2m has a static domain we must respect it when unlinking
+ domain = self._domain(obj) if callable(self._domain) else self._domain
+ extra_domain = domain or []
+ ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context)
+ # If the model has cascade deletion, we delete the rows because it is the intended behavior,
+ # otherwise we only nullify the reverse foreign key column.
if reverse_rel.column.ondelete == "cascade":
- obj.unlink(cr, user, obj.search(cr, user, [(self._fields_id,'=',id)], context=context), context=context)
+ obj.unlink(cr, user, ids_to_unlink, context=context)
else:
- cr.execute('update '+_table+' set '+self._fields_id+'=null where '+self._fields_id+'=%s', (id,))
+ obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context)
elif act[0] == 6:
# Must use write() to recompute parent_store structure if needed
obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {})
return result
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
- return obj.pool.get(self._obj).name_search(cr, uid, value, self._domain, operator, context=context,limit=limit)
+ domain = self._domain(obj) if callable(self._domain) else self._domain
+ return obj.pool.get(self._obj).name_search(cr, uid, value, domain, operator, context=context,limit=limit)
+
+ @classmethod
+ def _as_display_name(cls, field, cr, uid, obj, value, context=None):
+ raise NotImplementedError('One2Many columns should not be used as record name (_rec_name)')
#
# Values: (0, 0, { fields }) create
col1 = '%s_id' % source_model._table
if not col2:
col2 = '%s_id' % dest_model._table
- return (tbl, col1, col2)
+ return tbl, col1, col2
+
+ def _get_query_and_where_params(self, cr, model, ids, values, where_params):
+ """ Extracted from ``get`` to facilitate fine-tuning of the generated
+ query. """
+ query = 'SELECT %(rel)s.%(id2)s, %(rel)s.%(id1)s \
+ FROM %(rel)s, %(from_c)s \
+ WHERE %(rel)s.%(id1)s IN %%s \
+ AND %(rel)s.%(id2)s = %(tbl)s.id \
+ %(where_c)s \
+ %(order_by)s \
+ %(limit)s \
+ OFFSET %(offset)d' \
+ % values
+ return query, where_params
def get(self, cr, model, ids, name, user=None, offset=0, context=None, values=None):
if not context:
for id in ids:
res[id] = []
if offset:
- warnings.warn("Specifying offset at a many2many.get() may produce unpredictable results.",
- DeprecationWarning, stacklevel=2)
+ _logger.warning(
+ "Specifying offset at a many2many.get() is deprecated and may"
+ " produce unpredictable results.")
obj = model.pool.get(self._obj)
rel, id1, id2 = self._sql_names(model)
if where_c:
where_c = ' AND ' + where_c
- if offset or self._limit:
- order_by = ' ORDER BY "%s".%s' %(obj._table, obj._order.split(',')[0])
- else:
- order_by = ''
+ order_by = ' ORDER BY "%s".%s' %(obj._table, obj._order.split(',')[0])
limit_str = ''
if self._limit is not None:
limit_str = ' LIMIT %d' % self._limit
- query = 'SELECT %(rel)s.%(id2)s, %(rel)s.%(id1)s \
- FROM %(rel)s, %(from_c)s \
- WHERE %(rel)s.%(id1)s IN %%s \
- AND %(rel)s.%(id2)s = %(tbl)s.id \
- %(where_c)s \
- %(order_by)s \
- %(limit)s \
- OFFSET %(offset)d' \
- % {'rel': rel,
+ query, where_params = self._get_query_and_where_params(cr, model, ids, {'rel': rel,
'from_c': from_c,
'tbl': obj._table,
'id1': id1,
'limit': limit_str,
'order_by': order_by,
'offset': offset,
- }
+ }, where_params)
+
cr.execute(query, [tuple(ids),] + where_params)
for r in cr.fetchall():
res[r[1]].append(r[0])
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name', operator, value)], offset, limit, context=context)
+ @classmethod
+ def _as_display_name(cls, field, cr, uid, obj, value, context=None):
+ raise NotImplementedError('Many2Many columns should not be used as record name (_rec_name)')
+
def get_nice_size(value):
size = 0
self._classic_write = True
if type=='binary':
self._symbol_get=lambda x:x and str(x)
+ else:
+ self._prefetch = True
if type == 'float':
self._symbol_c = float._symbol_c
self._symbol_f = boolean._symbol_f
self._symbol_set = boolean._symbol_set
- if type in ['integer','integer_big']:
+ if type == 'integer':
self._symbol_c = integer._symbol_c
self._symbol_f = integer._symbol_f
self._symbol_set = integer._symbol_set
- def digits_change(self, cr):
- if self.digits_compute:
- t = self.digits_compute(cr)
- self._symbol_set=('%s', lambda x: ('%.'+str(t[1])+'f') % (__builtin__.float(x or 0.0),))
- self.digits = t
+ if type == 'char':
+ self._symbol_c = char._symbol_c
+ self._symbol_f = lambda x: _symbol_set_char(self, x)
+ self._symbol_set = (self._symbol_c, self._symbol_f)
+ def digits_change(self, cr):
+ if self._type == 'float':
+ if self.digits_compute:
+ self.digits = self.digits_compute(cr)
+ if self.digits:
+ precision, scale = self.digits
+ self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
+ precision_digits=scale),
+ precision_digits=scale))
def search(self, cr, uid, obj, name, args, context=None):
if not self._fnct_search:
result = (value, dict_names[value])
if field_type == 'binary':
- if context.get('bin_size', False):
+ if context.get('bin_size'):
# client requests only the size of binary fields
result = get_nice_size(value)
- else:
+ elif not context.get('bin_raw'):
result = sanitize_binary_value(value)
- if field_type in ("integer","integer_big") and value > xmlrpclib.MAXINT:
+ if field_type == "integer" and value > xmlrpclib.MAXINT:
# integer/long values greater than 2^31-1 are not supported
# in pure XMLRPC, so we have to pass them as floats :-(
# This is not needed for stored fields and non-functional integer
# fields, as their values are constrained by the database backend
# to the same 32bits signed int limit.
- result = float(value)
+ result = __builtin__.float(value)
return result
def get(self, cr, obj, ids, name, uid=False, context=None, values=None):
if self._fnct_inv:
self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context)
+ @classmethod
+ def _as_display_name(cls, field, cr, uid, obj, value, context=None):
+ # Function fields are supposed to emulate a basic field type,
+ # so they can delegate to the basic type for record name rendering
+ return globals()[field._type]._as_display_name(field, cr, uid, obj, value, context=context)
+
# ---------------------------------------------------------
# Related fields
# ---------------------------------------------------------
"""
def _fnct_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
- self._field_get2(cr, uid, obj, context)
- i = len(self._arg)-1
- sarg = name
- while i>0:
- if type(sarg) in [type([]), type( (1,) )]:
- where = [(self._arg[i], 'in', sarg)]
- else:
- where = [(self._arg[i], '=', sarg)]
- if domain:
- where = map(lambda x: (self._arg[i],x[1], x[2]), domain)
- domain = []
- sarg = obj.pool.get(self._relations[i]['object']).search(cr, uid, where, context=context)
- i -= 1
- return [(self._arg[0], 'in', sarg)]
+ # assume self._arg = ('foo', 'bar', 'baz')
+ # domain = [(name, op, val)] => search [('foo.bar.baz', op, val)]
+ field = '.'.join(self._arg)
+ return map(lambda x: (field, x[1], x[2]), domain)
def _fnct_write(self,obj,cr, uid, ids, field_name, values, args, context=None):
- self._field_get2(cr, uid, obj, context=context)
- if type(ids) != type([]):
- ids=[ids]
- objlst = obj.browse(cr, uid, ids)
- for data in objlst:
- t_id = data.id
- t_data = data
- for i in range(len(self.arg)):
- if not t_data: break
- field_detail = self._relations[i]
- if not t_data[self.arg[i]]:
- if self._type not in ('one2many', 'many2many'):
- t_id = t_data['id']
- t_data = False
- elif field_detail['type'] in ('one2many', 'many2many'):
- if self._type != "many2one":
- t_id = t_data.id
- t_data = t_data[self.arg[i]][0]
- else:
- t_data = False
- else:
- t_id = t_data['id']
- t_data = t_data[self.arg[i]]
- else:
- model = obj.pool.get(self._relations[-1]['object'])
- model.write(cr, uid, [t_id], {args[-1]: values}, context=context)
+ if isinstance(ids, (int, long)):
+ ids = [ids]
+ for record in obj.browse(cr, uid, ids, context=context):
+ # traverse all fields except the last one
+ for field in self.arg[:-1]:
+ record = record[field] or False
+ if not record:
+ break
+ elif isinstance(record, list):
+ # record is the result of a one2many or many2many field
+ record = record[0]
+ if record:
+ # write on the last field
+ record.write({self.arg[-1]: values})
def _fnct_read(self, obj, cr, uid, ids, field_name, args, context=None):
- self._field_get2(cr, uid, obj, context)
- if not ids: return {}
- relation = obj._name
- if self._type in ('one2many', 'many2many'):
- res = dict([(i, []) for i in ids])
- else:
- res = {}.fromkeys(ids, False)
-
- objlst = obj.browse(cr, 1, ids, context=context)
- for data in objlst:
- if not data:
- continue
- t_data = data
- relation = obj._name
- for i in range(len(self.arg)):
- field_detail = self._relations[i]
- relation = field_detail['object']
- try:
- if not t_data[self.arg[i]]:
- t_data = False
- break
- except:
- t_data = False
+ res = {}
+ for record in obj.browse(cr, SUPERUSER_ID, ids, context=context):
+ value = record
+ for field in self.arg:
+ if isinstance(value, list):
+ value = value[0]
+ value = value[field] or False
+ if not value:
break
- if field_detail['type'] in ('one2many', 'many2many') and i != len(self.arg) - 1:
- t_data = t_data[self.arg[i]][0]
- elif t_data:
- t_data = t_data[self.arg[i]]
- if type(t_data) == type(objlst[0]):
- res[data.id] = t_data.id
- elif t_data:
- res[data.id] = t_data
- if self._type=='many2one':
- ids = filter(None, res.values())
- if ids:
- # name_get as root, as seeing the name of a related
- # object depends on access right of source document,
- # not target, so user may not have access.
- ng = dict(obj.pool.get(self._obj).name_get(cr, 1, ids, context=context))
- for r in res:
- if res[r]:
- res[r] = (res[r], ng[res[r]])
+ res[record.id] = value
+
+ if self._type == 'many2one':
+ # res[id] is a browse_record or False; convert it to (id, name) or False.
+ # Perform name_get as root, as seeing the name of a related object depends on
+ # access right of source document, not target, so user may not have access.
+ value_ids = list(set(value.id for value in res.itervalues() if value))
+ value_name = dict(obj.pool.get(self._obj).name_get(cr, SUPERUSER_ID, value_ids, context=context))
+ res = dict((id, value and (value.id, value_name[value.id])) for id, value in res.iteritems())
+
elif self._type in ('one2many', 'many2many'):
- for r in res:
- if res[r]:
- res[r] = [x.id for x in res[r]]
+ # res[id] is a list of browse_record or False; convert it to a list of ids
+ res = dict((id, value and map(int, value) or []) for id, value in res.iteritems())
+
return res
def __init__(self, *arg, **args):
# TODO: improve here to change self.store = {...} according to related objects
pass
- def _field_get2(self, cr, uid, obj, context=None):
- if self._relations:
- return
- obj_name = obj._name
- for i in range(len(self._arg)):
- f = obj.pool.get(obj_name).fields_get(cr, uid, [self._arg[i]], context=context)[self._arg[i]]
- self._relations.append({
- 'object': obj_name,
- 'type': f['type']
-
- })
- if f.get('relation',False):
- obj_name = f['relation']
- self._relations[-1]['relation'] = f['relation']
-
class sparse(function):
serialized = getattr(record, self.serialization_field)
results[record.id] = {}
for field_name in field_names:
- if obj._columns[field_name]._type in ['one2many']:
- value = serialized.get(field_name, [])
- else:
- results[record.id].update(field_name=value)
+ field_type = obj._columns[field_name]._type
+ value = serialized.get(field_name, False)
+ if field_type in ('one2many','many2many'):
+ value = value or []
+ if value:
+ # filter out deleted records as superuser
+ relation_obj = obj.pool.get(obj._columns[field_name].relation)
+ value = relation_obj.exists(cr, openerp.SUPERUSER_ID, value)
+ if type(value) in (int,long) and field_type == 'many2one':
+ relation_obj = obj.pool.get(obj._columns[field_name].relation)
+ # check for deleted record as superuser
+ if not relation_obj.exists(cr, openerp.SUPERUSER_ID, [value]):
+ value = False
+ results[record.id][field_name] = value
return results
def __init__(self, serialization_field, **kwargs):
self.serialization_field = serialization_field
- return super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='__sparse_multi', method=True, **kwargs)
+ super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='__sparse_multi', **kwargs)
-
-
# ---------------------------------------------------------
# Dummy fields
# ---------------------------------------------------------
"""
def _symbol_set_struct(val):
- return json.dumps(val)
+ return simplejson.dumps(val)
def _symbol_get_struct(self, val):
- return json.loads(val or '{}')
+ return simplejson.loads(val or '{}')
_prefetch = False
_type = 'serialized'
# not target, so user may not have access) in order to avoid
# pointing on an unexisting record.
if property_destination_obj:
- if res[id][prop_name] and obj.pool.get(property_destination_obj).exists(cr, 1, res[id][prop_name].id):
+ if res[id][prop_name] and obj.pool.get(property_destination_obj).exists(cr, SUPERUSER_ID, res[id][prop_name].id):
name_get_ids[id] = res[id][prop_name].id
else:
res[id][prop_name] = False
# name_get as root (as seeing the name of a related
# object depends on access right of source document,
# not target, so user may not have access.)
- name_get_values = dict(obj.pool.get(property_destination_obj).name_get(cr, 1, name_get_ids.values(), context=context))
+ name_get_values = dict(obj.pool.get(property_destination_obj).name_get(cr, SUPERUSER_ID, name_get_ids.values(), context=context))
# the property field is a m2o, we need to return a tuple with (id, name)
for k, v in name_get_ids.iteritems():
if res[k][prop_name]:
"""
res = {'type': field._type}
- # This additional attributes for M2M and function field is added
- # because we need to display tooltip with this additional information
- # when client is started in debug mode.
+ # some attributes for m2m/function field are added as debug info only
if isinstance(field, function):
res['function'] = field._fnct and field._fnct.func_name or False
res['store'] = field.store
res['fnct_search'] = field._fnct_search and field._fnct_search.func_name or False
res['fnct_inv'] = field._fnct_inv and field._fnct_inv.func_name or False
res['fnct_inv_arg'] = field._fnct_inv_arg or False
- res['func_obj'] = field._obj or False
if isinstance(field, many2many):
(table, col1, col2) = field._sql_names(model)
- res['related_columns'] = [col1, col2]
- res['third_table'] = table
- for arg in ('string', 'readonly', 'states', 'size', 'required', 'group_operator',
- 'change_default', 'translate', 'help', 'select', 'selectable'):
- if getattr(field, arg):
- res[arg] = getattr(field, arg)
- for arg in ('digits', 'invisible', 'filters'):
+ res['m2m_join_columns'] = [col1, col2]
+ res['m2m_join_table'] = table
+ for arg in ('string', 'readonly', 'states', 'size', 'group_operator', 'required',
+ 'change_default', 'translate', 'help', 'select', 'selectable', 'groups',
+ 'deprecated', 'digits', 'invisible', 'filters'):
if getattr(field, arg, None):
res[arg] = getattr(field, arg)
- if field.string:
- res['string'] = field.string
- if field.help:
- res['help'] = field.help
-
if hasattr(field, 'selection'):
if isinstance(field.selection, (tuple, list)):
res['selection'] = field.selection
else:
# call the 'dynamic selection' function
res['selection'] = field.selection(model, cr, user, context)
- if res['type'] in ('one2many', 'many2many', 'many2one', 'one2one'):
+ if res['type'] in ('one2many', 'many2many', 'many2one'):
res['relation'] = field._obj
- res['domain'] = field._domain
+ res['domain'] = field._domain(model) if callable(field._domain) else field._domain
res['context'] = field._context
if isinstance(field, one2many):
class column_info(object):
- """Struct containing details about an osv column, either one local to
- its model, or one inherited via _inherits.
-
- :attr name: name of the column
- :attr column: column instance, subclass of osv.fields._column
- :attr parent_model: if the column is inherited, name of the model
- that contains it, None for local columns.
- :attr parent_column: the name of the column containing the m2o
- relationship to the parent model that contains
- this column, None for local columns.
- :attr original_parent: if the column is inherited, name of the original
- parent model that contains it i.e in case of multilevel
- inheritence, None for local columns.
+ """ Struct containing details about an osv column, either one local to
+ its model, or one inherited via _inherits.
+
+ .. attribute:: name
+
+ name of the column
+
+ .. attribute:: column
+
+ column instance, subclass of :class:`_column`
+
+ .. attribute:: parent_model
+
+ if the column is inherited, name of the model that contains it,
+ ``None`` for local columns.
+
+ .. attribute:: parent_column
+
+ the name of the column containing the m2o relationship to the
+ parent model that contains this column, ``None`` for local columns.
+
+ .. attribute:: original_parent
+
+ if the column is inherited, name of the original parent model that
+ contains it i.e in case of multilevel inheritance, ``None`` for
+ local columns.
"""
def __init__(self, name, column, parent_model=None, parent_column=None, original_parent=None):
self.name = name
self.parent_column = parent_column
self.original_parent = original_parent
+ def __str__(self):
+ return '%s(%s, %s, %s, %s, %s)' % (
+ self.__class__.__name__, self.name, self.column,
+ self.parent_model, self.parent_column, self.original_parent)
+
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: