[FIX] base/res_partner: small fix for backward compatibility
[odoo/odoo.git] / openerp / osv / fields.py
index 04364a6..922fd5f 100644 (file)
 
 import base64
 import datetime as DT
+import logging
+import pytz
 import re
-import string
-import sys
-import warnings
 import xmlrpclib
 from psycopg2 import Binary
 
 import openerp
-import openerp.netsvc as netsvc
 import openerp.tools as tools
 from openerp.tools.translate import _
-import json
+from openerp.tools import float_round, float_repr
+import simplejson
+
+_logger = logging.getLogger(__name__)
 
 def _symbol_set(symb):
     if symb == None or symb == False:
@@ -74,6 +75,9 @@ class _column(object):
     _symbol_set = (_symbol_c, _symbol_f)
     _symbol_get = None
 
+    # used to hide a certain field type in the list of field types
+    _deprecated = False
+
     def __init__(self, string='unknown', required=False, readonly=False, domain=None, context=None, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
         """
 
@@ -135,8 +139,10 @@ class boolean(_column):
     def __init__(self, string='unknown', required=False, **args):
         super(boolean, self).__init__(string=string, required=required, **args)
         if required:
-            warnings.warn("Making a boolean field `required` has no effect, as NULL values are "
-                          "automatically turned into False", PendingDeprecationWarning, stacklevel=2)
+            _logger.debug(
+                "required=True is deprecated: making a boolean field"
+                " `required` has no effect, as NULL values are "
+                "automatically turned into False.")
 
 class integer(_column):
     _type = 'integer'
@@ -148,8 +154,10 @@ class integer(_column):
     def __init__(self, string='unknown', required=False, **args):
         super(integer, self).__init__(string=string, required=required, **args)
         if required:
-            warnings.warn("Making an integer field `required` has no effect, as NULL values are "
-                          "automatically turned into 0", PendingDeprecationWarning, stacklevel=2)
+            _logger.debug(
+                "required=True is deprecated: making an integer field"
+                " `required` has no effect, as NULL values are "
+                "automatically turned into 0.")
 
 class integer_big(_column):
     """Experimental 64 bit integer column type, currently unused.
@@ -167,12 +175,15 @@ class integer_big(_column):
     _symbol_f = lambda x: int(x or 0)
     _symbol_set = (_symbol_c, _symbol_f)
     _symbol_get = lambda self,x: x or 0
+    _deprecated = True
 
     def __init__(self, string='unknown', required=False, **args):
         super(integer_big, self).__init__(string=string, required=required, **args)
         if required:
-            warnings.warn("Making an integer_big field `required` has no effect, as NULL values are "
-                          "automatically turned into 0", PendingDeprecationWarning, stacklevel=2)
+            _logger.debug(
+                "required=True is deprecated: making an integer_big field"
+                " `required` has no effect, as NULL values are "
+                "automatically turned into 0.")
 
 class reference(_column):
     _type = 'reference'
@@ -230,20 +241,26 @@ class float(_column):
     def __init__(self, string='unknown', digits=None, digits_compute=None, required=False, **args):
         _column.__init__(self, string=string, required=required, **args)
         self.digits = digits
+        # synopsis: digits_compute(cr) ->  (precision, scale)
         self.digits_compute = digits_compute
         if required:
-            warnings.warn("Making a float field `required` has no effect, as NULL values are "
-                          "automatically turned into 0.0", PendingDeprecationWarning, stacklevel=2)
-
+            _logger.debug(
+                "required=True is deprecated: making a float field"
+                " `required` has no effect, as NULL values are "
+                "automatically turned into 0.0.")
 
     def digits_change(self, cr):
         if self.digits_compute:
-            t = self.digits_compute(cr)
-            self._symbol_set=('%s', lambda x: ('%.'+str(t[1])+'f') % (__builtin__.float(x or 0.0),))
-            self.digits = t
+            self.digits = self.digits_compute(cr)
+        if self.digits:
+            precision, scale = self.digits
+            self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
+                                                                       precision_digits=scale),
+                                                           precision_digits=scale))
 
 class date(_column):
     _type = 'date'
+
     @staticmethod
     def today(*args):
         """ Returns the current date in a format fit for being a
@@ -255,6 +272,38 @@ class date(_column):
         return DT.date.today().strftime(
             tools.DEFAULT_SERVER_DATE_FORMAT)
 
+    @staticmethod
+    def context_today(model, cr, uid, context=None, timestamp=None):
+        """Returns the current date as seen in the client's timezone
+           in a format fit for date fields.
+           This method may be passed as value to initialize _defaults.
+
+           :param Model model: model (osv) for which the date value is being
+                               computed - technical field, currently ignored,
+                               automatically passed when used in _defaults.
+           :param datetime timestamp: optional datetime value to use instead of
+                                      the current date and time (must be a
+                                      datetime, regular dates can't be converted
+                                      between timezones.)
+           :param dict context: the 'tz' key in the context should give the
+                                name of the User/Client timezone (otherwise
+                                UTC is used)
+           :rtype: str 
+        """
+        today = timestamp or DT.datetime.now()
+        context_today = None
+        if context and context.get('tz'):
+            try:
+                utc = pytz.timezone('UTC')
+                context_tz = pytz.timezone(context['tz'])
+                utc_today = utc.localize(today, is_dst=False) # UTC = no DST
+                context_today = utc_today.astimezone(context_tz)
+            except Exception:
+                _logger.debug("failed to compute context/client-specific today date, "
+                              "using the UTC value for `today`",
+                              exc_info=True)
+        return (context_today or today).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
+
 class datetime(_column):
     _type = 'datetime'
     @staticmethod
@@ -268,8 +317,39 @@ class datetime(_column):
         return DT.datetime.now().strftime(
             tools.DEFAULT_SERVER_DATETIME_FORMAT)
 
+    @staticmethod
+    def context_timestamp(cr, uid, timestamp, context=None):
+        """Returns the given timestamp converted to the client's timezone.
+           This method is *not* meant for use as a _defaults initializer,
+           because datetime fields are automatically converted upon
+           display on client side. For _defaults you :meth:`fields.datetime.now`
+           should be used instead.
+
+           :param datetime timestamp: naive datetime value (expressed in UTC)
+                                      to be converted to the client timezone
+           :param dict context: the 'tz' key in the context should give the
+                                name of the User/Client timezone (otherwise
+                                UTC is used)
+           :rtype: datetime
+           :return: timestamp converted to timezone-aware datetime in context
+                    timezone
+        """
+        assert isinstance(timestamp, DT.datetime), 'Datetime instance expected'
+        if context and context.get('tz'):
+            try:
+                utc = pytz.timezone('UTC')
+                context_tz = pytz.timezone(context['tz'])
+                utc_timestamp = utc.localize(timestamp, is_dst=False) # UTC = no DST
+                return utc_timestamp.astimezone(context_tz)
+            except Exception:
+                _logger.debug("failed to compute context/client-specific timestamp, "
+                              "using the UTC value",
+                              exc_info=True)
+        return timestamp
+
 class time(_column):
     _type = 'time'
+    _deprecated = True
     @staticmethod
     def now( *args):
         """ Returns the current time in a format fit for being a
@@ -343,9 +423,10 @@ class one2one(_column):
     _classic_read = False
     _classic_write = True
     _type = 'one2one'
+    _deprecated = True
 
     def __init__(self, obj, string='unknown', **args):
-        warnings.warn("The one2one field doesn't work anymore", DeprecationWarning)
+        _logger.warning("The one2one field is deprecated and doesn't work anymore.")
         _column.__init__(self, string=string, **args)
         self._obj = obj
 
@@ -500,11 +581,15 @@ class one2many(_column):
             elif act[0] == 5:
                 reverse_rel = obj._all_columns.get(self._fields_id)
                 assert reverse_rel, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
-                # if the model has on delete cascade, just delete the rows
+                # if the o2m has a static domain we must respect it when unlinking
+                extra_domain = self._domain if isinstance(getattr(self, '_domain', None), list) else [] 
+                ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context)
+                # If the model has cascade deletion, we delete the rows because it is the intended behavior,
+                # otherwise we only nullify the reverse foreign key column.
                 if reverse_rel.column.ondelete == "cascade":
-                    obj.unlink(cr, user, obj.search(cr, user, [(self._fields_id,'=',id)], context=context), context=context)
+                    obj.unlink(cr, user, ids_to_unlink, context=context)
                 else:
-                    cr.execute('update '+_table+' set '+self._fields_id+'=null where '+self._fields_id+'=%s', (id,))
+                    obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context)
             elif act[0] == 6:
                 # Must use write() to recompute parent_store structure if needed
                 obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {})
@@ -606,8 +691,9 @@ class many2many(_column):
         for id in ids:
             res[id] = []
         if offset:
-            warnings.warn("Specifying offset at a many2many.get() may produce unpredictable results.",
-                      DeprecationWarning, stacklevel=2)
+            _logger.warning(
+                "Specifying offset at a many2many.get() is deprecated and may"
+                " produce unpredictable results.")
         obj = model.pool.get(self._obj)
         rel, id1, id2 = self._sql_names(model)
 
@@ -991,11 +1077,14 @@ class function(_column):
             self._symbol_set = integer._symbol_set
 
     def digits_change(self, cr):
-        if self.digits_compute:
-            t = self.digits_compute(cr)
-            self._symbol_set=('%s', lambda x: ('%.'+str(t[1])+'f') % (__builtin__.float(x or 0.0),))
-            self.digits = t
-
+        if self._type == 'float':
+            if self.digits_compute:
+                self.digits = self.digits_compute(cr)
+            if self.digits:
+                precision, scale = self.digits
+                self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
+                                                                           precision_digits=scale),
+                                                               precision_digits=scale))
 
     def search(self, cr, uid, obj, name, args, context=None):
         if not self._fnct_search:
@@ -1016,10 +1105,10 @@ class function(_column):
                 result = (value, dict_names[value])
 
         if field_type == 'binary':
-            if context.get('bin_size', False):
+            if context.get('bin_size'):
                 # client requests only the size of binary fields
                 result = get_nice_size(value)
-            else:
+            elif not context.get('bin_raw'):
                 result = sanitize_binary_value(value)
 
         if field_type in ("integer","integer_big") and value > xmlrpclib.MAXINT:
@@ -1066,17 +1155,17 @@ class related(function):
     def _fnct_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
         self._field_get2(cr, uid, obj, context)
         i = len(self._arg)-1
-        sarg = name
+        sarg = name if isinstance(name, (list, tuple)) else [name]
         while i>0:
-            if type(sarg) in [type([]), type( (1,) )]:
-                where = [(self._arg[i], 'in', sarg)]
-            else:
-                where = [(self._arg[i], '=', sarg)]
             if domain:
                 where = map(lambda x: (self._arg[i],x[1], x[2]), domain)
                 domain = []
+            else:
+                where = [(self._arg[i], 'in', sarg)]
             sarg = obj.pool.get(self._relations[i]['object']).search(cr, uid, where, context=context)
             i -= 1
+        if domain:   # happens if len(self._arg) == 1
+            return map(lambda x: (self._arg[0],x[1], x[2]), domain)
         return [(self._arg[0], 'in', sarg)]
 
     def _fnct_write(self,obj,cr, uid, ids, field_name, values, args, context=None):
@@ -1167,17 +1256,19 @@ class related(function):
     def _field_get2(self, cr, uid, obj, context=None):
         if self._relations:
             return
+        result = []
         obj_name = obj._name
         for i in range(len(self._arg)):
             f = obj.pool.get(obj_name).fields_get(cr, uid, [self._arg[i]], context=context)[self._arg[i]]
-            self._relations.append({
+            result.append({
                 'object': obj_name,
                 'type': f['type']
 
             })
             if f.get('relation',False):
                 obj_name = f['relation']
-                self._relations[-1]['relation'] = f['relation']
+                result[-1]['relation'] = f['relation']
+        self._relations = result
 
 
 class sparse(function):   
@@ -1210,23 +1301,23 @@ class sparse(function):
         """
 
         if self._type == 'many2many':
-            #NOTE only the option (0, 0,  { values }) is supported for many2many
-            if value[0][0] == 6:
-                return value[0][2]
-            
+            assert value[0][0] == 6, 'Unsupported m2m value for sparse field: %s' % value
+            return value[0][2]
+
         elif self._type == 'one2many':
             if not read_value:
-                read_value=[]
+                read_value = []
             relation_obj = obj.pool.get(self.relation)
             for vals in value:
+                assert vals[0] in (0,1,2), 'Unsupported o2m value for sparse field: %s' % vals
                 if vals[0] == 0:
                     read_value.append(relation_obj.create(cr, uid, vals[2], context=context))
                 elif vals[0] == 1:
                     relation_obj.write(cr, uid, vals[1], vals[2], context=context)
                 elif vals[0] == 2:
-                    relation_obj.unlink(cr, uid, vals[1])
+                    relation_obj.unlink(cr, uid, vals[1], context=context)
                     read_value.remove(vals[1])
-            return read_value        
+            return read_value
         return value
 
 
@@ -1236,42 +1327,45 @@ class sparse(function):
         records = obj.browse(cr, uid, ids, context=context)
         for record in records:
             # grab serialized value as object - already deserialized
-            serialized = record.__getattr__(self.serialization_field)
-            # we have to delete the key in the json when the value is null
+            serialized = getattr(record, self.serialization_field)
             if value is None:
-                if field_name in serialized:
-                    del serialized[field_name]
-                else:
-                    # nothing to do, we dont wan't to store the key with a null value
-                    continue
+                # simply delete the key to unset it.
+                serialized.pop(field_name, None)
             else: 
                 serialized[field_name] = self.convert_value(obj, cr, uid, record, value, serialized.get(field_name), context=context)
             obj.write(cr, uid, ids, {self.serialization_field: serialized}, context=context)
         return True
 
     def _fnct_read(self, obj, cr, uid, ids, field_names, args, context=None):
-        results={}
+        results = {}
         records = obj.browse(cr, uid, ids, context=context)
         for record in records:
             # grab serialized value as object - already deserialized
-            serialized = record.__getattr__(self.serialization_field)
-            results[record.id] ={}
+            serialized = getattr(record, self.serialization_field)
+            results[record.id] = {}
             for field_name in field_names:
-                if obj._columns[field_name]._type in ['one2many']:
-                    results[record.id].update({field_name : serialized.get(field_name, [])})
-                else:
-                    results[record.id].update({field_name : serialized.get(field_name)})
+                field_type = obj._columns[field_name]._type
+                value = serialized.get(field_name, False)
+                if field_type in ('one2many','many2many'):
+                    value = value or []
+                    if value:
+                        # filter out deleted records as superuser
+                        relation_obj = obj.pool.get(obj._columns[field_name].relation)
+                        value = relation_obj.exists(cr, openerp.SUPERUSER_ID, value)
+                if type(value) in (int,long) and field_type == 'many2one':
+                    relation_obj = obj.pool.get(obj._columns[field_name].relation)
+                    # check for deleted record as superuser
+                    if not relation_obj.exists(cr, openerp.SUPERUSER_ID, [value]):
+                        value = False
+                results[record.id][field_name] = value
         return results
 
     def __init__(self, serialization_field, **kwargs):
         self.serialization_field = serialization_field
-        #assert serialization_field._type == 'serialized'
-        return super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='_json_multi', method=True, **kwargs)
+        return super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='__sparse_multi', **kwargs)
      
 
 
-
-
 # ---------------------------------------------------------
 # Dummy fields
 # ---------------------------------------------------------
@@ -1302,10 +1396,10 @@ class serialized(_column):
     """
     
     def _symbol_set_struct(val):
-        return json.dumps(val)
+        return simplejson.dumps(val)
 
     def _symbol_get_struct(self, val):
-        return json.loads(val or '{}')
+        return simplejson.loads(val or '{}')
     
     _prefetch = False
     _type = 'serialized'