[MERGE] OPW 578099: ir.filters should be translated according to the user language
[odoo/odoo.git] / bin / osv / osv.py
index 369f006..d9d7b35 100644 (file)
@@ -1,31 +1,23 @@
-# -*- encoding: utf-8 -*-
+# -*- coding: utf-8 -*-
 ##############################################################################
 #
-# Copyright (c) 2004-2008 Tiny SPRL (http://tiny.be) All Rights Reserved.
+#    OpenERP, Open Source Management Solution
+#    Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
 #
-# $Id$
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU Affero General Public License as
+#    published by the Free Software Foundation, either version 3 of the
+#    License, or (at your option) any later version.
 #
-# WARNING: This program as such is intended to be used by professional
-# programmers who take the whole responsability of assessing all potential
-# consequences resulting from its eventual inadequacies and bugs
-# End users who are looking for a ready-to-use solution with commercial
-# garantees and support are strongly adviced to contract a Free Software
-# Service Company
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU Affero General Public License for more details.
 #
-# This program is Free Software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
+#    You should have received a copy of the GNU Affero General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
-###############################################################################
+##############################################################################
 
 #
 # OSV: Objects Services
@@ -35,11 +27,10 @@ import orm
 import netsvc
 import pooler
 import copy
-import sys
-
-import psycopg
-from netsvc import Logger, LOG_ERROR
-from tools.misc import UpdateableDict
+import logging
+from psycopg2 import IntegrityError, errorcodes
+from tools.func import wraps
+from tools.translate import translate
 
 module_list = []
 module_class_list = {}
@@ -50,56 +41,141 @@ class except_osv(Exception):
         self.name = name
         self.exc_type = exc_type
         self.value = value
-        self.args = (exc_type,name)
+        self.args = (exc_type, name)
 
-class osv_pool(netsvc.Service):
 
+class object_proxy(netsvc.Service):
     def __init__(self):
-        self.obj_pool = {}
-        self.module_object_list = {}
-        self.created = []
-        self._sql_error = {}
-        self._store_function = {}
+        self.logger = logging.getLogger('web-services')
         netsvc.Service.__init__(self, 'object_proxy', audience='')
-        self.joinGroup('web-services')
-        self.exportMethod(self.exportedMethods)
-        self.exportMethod(self.obj_list)
         self.exportMethod(self.exec_workflow)
         self.exportMethod(self.execute)
-        self.exportMethod(self.execute_cr)
+
+    def check(f):
+        @wraps(f)
+        def wrapper(self, dbname, *args, **kwargs):
+            """ Wraps around OSV functions and normalises a few exceptions
+            """
+
+            def tr(src, ttype):
+                # We try to do the same as the _(), but without the frame
+                # inspection, since we aready are wrapping an osv function
+                # trans_obj = self.get('ir.translation') cannot work yet :(
+                ctx = {}
+                if not kwargs:
+                    if args and isinstance(args[-1], dict):
+                        ctx = args[-1]
+                elif isinstance(kwargs, dict):
+                    ctx = kwargs.get('context', {})
+
+                uid = 1
+                if args and isinstance(args[0], (long, int)):
+                    uid = args[0]
+
+                lang = ctx and ctx.get('lang')
+                if not (lang or hasattr(src, '__call__')):
+                    return src
+
+                # We open a *new* cursor here, one reason is that failed SQL
+                # queries (as in IntegrityError) will invalidate the current one.
+                cr = False
+                
+                if hasattr(src, '__call__'):
+                    # callable. We need to find the right parameters to call
+                    # the  orm._sql_message(self, cr, uid, ids, context) function,
+                    # or we skip..
+                    # our signature is f(osv_pool, dbname [,uid, obj, method, args])
+                    try:
+                        if args and len(args) > 1:
+                            obj = self.get(args[1])
+                            if len(args) > 3 and isinstance(args[3], (long, int, list)):
+                                ids = args[3]
+                            else:
+                                ids = []
+                        cr = pooler.get_db_only(dbname).cursor()
+                        return src(obj, cr, uid, ids, context=(ctx or {}))
+                    except Exception:
+                        pass
+                    finally:
+                        if cr: cr.close()
+                   
+                    return False # so that the original SQL error will
+                                 # be returned, it is the best we have.
+
+                try:
+                    cr = pooler.get_db_only(dbname).cursor()
+                    res = translate(cr, name=False, source_type=ttype,
+                                    lang=lang, source=src)
+                    if res:
+                        return res
+                    else:
+                        return src
+                finally:
+                    if cr: cr.close()
+
+            def _(src):
+                return tr(src, 'code')
+
+            try:
+                if not pooler.get_pool(dbname)._ready:
+                    raise except_osv('Database not ready', 'Currently, this database is not fully loaded and can not be used.')
+                return f(self, dbname, *args, **kwargs)
+            except orm.except_orm, inst:
+                if inst.name == 'AccessError':
+                    self.logger.debug("AccessError", exc_info=True)
+                self.abortResponse(1, inst.name, 'warning', inst.value)
+            except except_osv, inst:
+                self.abortResponse(1, inst.name, inst.exc_type, inst.value)
+            except IntegrityError, inst:
+                osv_pool = pooler.get_pool(dbname)
+                for key in osv_pool._sql_error.keys():
+                    if key in inst[0]:
+                        self.abortResponse(1, _('Constraint Error'), 'warning',
+                                        tr(osv_pool._sql_error[key], 'sql_constraint') or inst[0])
+                if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION):
+                    msg = _('The operation cannot be completed, probably due to the following:\n- deletion: you may be trying to delete a record while other records still reference it\n- creation/update: a mandatory field is not correctly set')
+                    self.logger.debug("IntegrityError", exc_info=True)
+                    try:
+                        errortxt = inst.pgerror.replace('«','"').replace('»','"')
+                        if '"public".' in errortxt:
+                            context = errortxt.split('"public".')[1]
+                            model_name = table = context.split('"')[1]
+                        else:
+                            last_quote_end = errortxt.rfind('"')
+                            last_quote_begin = errortxt.rfind('"', 0, last_quote_end)
+                            model_name = table = errortxt[last_quote_begin+1:last_quote_end].strip()
+                        model = table.replace("_",".")
+                        model_obj = osv_pool.get(model)
+                        if model_obj:
+                            model_name = model_obj._description or model_obj._name
+                        msg += _('\n\n[object with reference: %s - %s]') % (model_name, model)
+                    except Exception:
+                        pass
+                    self.abortResponse(1, _('Integrity Error'), 'warning', msg)
+                else:
+                    self.abortResponse(1, _('Integrity Error'), 'warning', inst[0])
+            except Exception:
+                self.logger.exception("Uncaught exception")
+                raise
+
+        return wrapper
 
     def execute_cr(self, cr, uid, obj, method, *args, **kw):
-        try:
-            object = pooler.get_pool(cr.dbname).get(obj)
-            if not object:
-                self.abortResponse(1, 'Object Error', 'warning',
-                'Object %s doesn\'t exist' % str(obj))
-            return getattr(object,method)(cr, uid, *args, **kw)
-        except orm.except_orm, inst:
-            self.abortResponse(1, inst.name, 'warning', inst.value)
-        except except_osv, inst:
-            self.abortResponse(1, inst.name, inst.exc_type, inst.value)
-        except psycopg.IntegrityError, inst:
-            for key in self._sql_error.keys():
-                if key in inst[0]:
-                    self.abortResponse(1, 'Constraint Error', 'warning',
-                            self._sql_error[key])
-            self.abortResponse(1, 'Integrity Error', 'warning', inst[0])
-        except Exception, e:
-            import traceback
-            tb_s = reduce(lambda x, y: x+y, traceback.format_exception(
-                sys.exc_type, sys.exc_value, sys.exc_traceback))
-            logger = Logger()
-            logger.notifyChannel("web-services", LOG_ERROR,
-                    'Exception in call: ' + tb_s)
-            raise
+        object = pooler.get_pool(cr.dbname).get(obj)
+        if not object:
+            raise except_osv('Object Error', 'Object %s doesn\'t exist' % str(obj))
+        return getattr(object, method)(cr, uid, *args, **kw)
 
+    @check
     def execute(self, db, uid, obj, method, *args, **kw):
-        db, pool = pooler.get_db_and_pool(db)
-        cr = db.cursor()
+        cr = pooler.get_db(db).cursor()
         try:
             try:
-                res = pool.execute_cr(cr, uid, obj, method, *args, **kw)
+                if method.startswith('_'):
+                    raise except_osv('Access Denied', 'Private methods (such as %s) cannot be called remotely.' % (method,))
+                res = self.execute_cr(cr, uid, obj, method, *args, **kw)
+                if res is None:
+                    self.logger.warning('The method %s of the object %s can not return `None` !', method, obj)
                 cr.commit()
             except Exception:
                 cr.rollback()
@@ -112,29 +188,55 @@ class osv_pool(netsvc.Service):
         wf_service = netsvc.LocalService("workflow")
         return wf_service.trg_validate(uid, obj, args[0], method, cr)
 
+    @check
     def exec_workflow(self, db, uid, obj, method, *args):
         cr = pooler.get_db(db).cursor()
         try:
             try:
                 res = self.exec_workflow_cr(cr, uid, obj, method, *args)
                 cr.commit()
-            except orm.except_orm, inst:
-                cr.rollback()
-                self.abortResponse(1, inst.name, 'warning', inst.value)
-            except except_osv, inst:
+            except Exception:
                 cr.rollback()
-                self.abortResponse(1, inst.name, inst[0], inst.value)
+                raise
         finally:
             cr.close()
         return res
 
+object_proxy()
+
+class osv_pool(object):
+    def __init__(self):
+        self._ready = False
+        self.obj_pool = {}
+        self.module_object_list = {}
+        self.created = []
+        self._sql_error = {}
+        self._store_function = {}
+        self._init = True
+        self._init_parent = {}
+        self.logger = logging.getLogger("pool")
+
+    def init_set(self, cr, mode):
+        different = mode != self._init
+        if different:
+            if mode:
+                self._init_parent = {}
+            if not mode:
+                for o in self._init_parent:
+                    self.get(o)._parent_store_compute(cr)
+            self._init = mode
+
+        self._ready = True
+        return different
+
+
     def obj_list(self):
         return self.obj_pool.keys()
 
     # adds a new object instance to the object pool.
     # if it already existed, the instance is replaced
     def add(self, name, obj_inst):
-        if self.obj_pool.has_key(name):
+        if name in self.obj_pool:
             del self.obj_pool[name]
         self.obj_pool[name] = obj_inst
 
@@ -143,31 +245,25 @@ class osv_pool(netsvc.Service):
         module = module.split('.')[0][2:]
         self.module_object_list.setdefault(module, []).append(obj_inst)
 
+    # Return None if object does not exist
     def get(self, name):
         obj = self.obj_pool.get(name, None)
-# We cannot uncomment this line because it breaks initialisation since objects do not initialize
-# in the correct order and the ORM doesnt support correctly when some objets do not exist yet
-#       assert obj, "object %s does not exist !" % name
         return obj
 
     #TODO: pass a list of modules to load
     def instanciate(self, module, cr):
-#       print "module list:", module_list
-#       for module in module_list:
         res = []
         class_list = module_class_list.get(module, [])
-#           if module not in self.module_object_list:
-#       print "%s class_list:" % module, class_list
         for klass in class_list:
             res.append(klass.createInstance(self, module, cr))
         return res
-#           else:
-#               print "skipping module", module
 
-#pooler.get_pool(cr.dbname) = osv_pool()
+class osv_base(object):
+    def __init__(self, pool, cr):
+        pool.add(self._name, self)
+        self.pool = pool
+        super(osv_base, self).__init__(cr)
 
-class osv_memory(orm.orm_memory):
-    #__metaclass__ = inheritor
     def __new__(cls):
         module = str(cls)[6:]
         module = module[:len(module)-1]
@@ -180,183 +276,122 @@ class osv_memory(orm.orm_memory):
             module_list.append(cls._module)
         return None
 
+class osv_memory(osv_base, orm.orm_memory):
     #
     # Goal: try to apply inheritancy at the instanciation level and
     #       put objects in the pool var
     #
     def createInstance(cls, pool, module, cr):
-        name = hasattr(cls,'_name') and cls._name or cls._inherit
-        parent_name = hasattr(cls, '_inherit') and cls._inherit
-        if parent_name:
-            print 'Inherit not supported in osv_memory object !'
+        parent_names = getattr(cls, '_inherit', None)
+        if parent_names:
+            if isinstance(parent_names, (str, unicode)):
+                name = cls._name or parent_names
+                parent_names = [parent_names]
+            else:
+                name = cls._name
+            if not name:
+                raise TypeError('_name is mandatory in case of multiple inheritance')
+
+            for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
+                parent_class = pool.get(parent_name).__class__
+                assert pool.get(parent_name), "parent class %s does not exist in module %s !" % (parent_name, module)
+                nattr = {}
+                for s in ('_columns', '_defaults'):
+                    new = copy.copy(getattr(pool.get(parent_name), s))
+                    if s == '_columns':
+                        # Don't _inherit custom fields.
+                        for c in new.keys():
+                            if new[c].manual:
+                                del new[c]
+                        # Duplicate float fields because they have a .digits
+                        # cache (which must be per-pool, not server-wide).
+                        for c in new.keys():
+                            if new[c]._type == 'float':
+                                new[c] = copy.copy(new[c])
+                    if hasattr(new, 'update'):
+                        new.update(cls.__dict__.get(s, {}))
+                    else:
+                        new.extend(cls.__dict__.get(s, []))
+                    nattr[s] = new
+                cls = type(name, (cls, parent_class), nattr)
+        else:
+            # Duplicate float fields because they have a .digits
+            # cache (which must be per-pool, not server-wide).
+            for field_name, field in cls._columns.items():
+                if field._type == 'float':
+                    cls._columns[field_name] = copy.copy(field)
+
         obj = object.__new__(cls)
         obj.__init__(pool, cr)
         return obj
     createInstance = classmethod(createInstance)
 
-    def __init__(self, pool, cr):
-        pool.add(self._name, self)
-        self.pool = pool
-        orm.orm_memory.__init__(self, cr)
-
-
-
-class osv(orm.orm):
-    #__metaclass__ = inheritor
-    def __new__(cls):
-        module = str(cls)[6:]
-        module = module[:len(module)-1]
-        module = module.split('.')[0][2:]
-        if not hasattr(cls, '_module'):
-            cls._module = module
-        module_class_list.setdefault(cls._module, []).append(cls)
-        class_pool[cls._name] = cls
-        if module not in module_list:
-            module_list.append(cls._module)
-        return None
-
+class osv(osv_base, orm.orm):
     #
     # Goal: try to apply inheritancy at the instanciation level and
     #       put objects in the pool var
     #
     def createInstance(cls, pool, module, cr):
-        parent_name = hasattr(cls, '_inherit') and cls._inherit
-        if parent_name:
-            parent_class = pool.get(parent_name).__class__
-            assert parent_class, "parent class %s does not exist !" % parent_name
-            nattr = {}
-            for s in ('_columns', '_defaults', '_inherits', '_constraints', '_sql_constraints'):
-                new = copy.copy(getattr(pool.get(parent_name), s))
-                if hasattr(new, 'update'):
-                    new.update(cls.__dict__.get(s, {}))
-                else:
-                    new.extend(cls.__dict__.get(s, []))
-                nattr[s] = new
-            name = hasattr(cls,'_name') and cls._name or cls._inherit
-            cls = type(name, (cls, parent_class), nattr)
+        parent_names = getattr(cls, '_inherit', None)
+        if parent_names:
+            if isinstance(parent_names, (str, unicode)):
+                name = cls._name or parent_names
+                parent_names = [parent_names]
+            else:
+                name = cls._name
+            if not name:
+                raise TypeError('_name is mandatory in case of multiple inheritance')
+
+            for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
+                parent_class = pool.get(parent_name).__class__
+                assert pool.get(parent_name), "parent class %s does not exist in module %s !" % (parent_name, module)
+                nattr = {}
+                for s in ('_columns', '_defaults', '_inherits', '_constraints', '_sql_constraints'):
+                    new = copy.copy(getattr(pool.get(parent_name), s))
+                    if s == '_columns':
+                        # Don't _inherit custom fields.
+                        for c in new.keys():
+                            if new[c].manual:
+                                del new[c]
+                        # Duplicate float fields because they have a .digits
+                        # cache (which must be per-pool, not server-wide).
+                        for c in new.keys():
+                            if new[c]._type == 'float':
+                                new[c] = copy.copy(new[c])
+
+                    if hasattr(new, 'update'):
+                        new.update(cls.__dict__.get(s, {}))
+                    else:
+                        if s=='_constraints':
+                            for c in cls.__dict__.get(s, []):
+                                exist = False
+                                for c2 in range(len(new)):
+                                     #For _constraints, we should check field and methods as well
+                                     if new[c2][2]==c[2] and (new[c2][0] == c[0] \
+                                            or getattr(new[c2][0],'__name__', True) == \
+                                                getattr(c[0],'__name__', False)):
+                                        # If new class defines a constraint with
+                                        # same function name, we let it override
+                                        # the old one.
+                                        new[c2] = c
+                                        exist = True
+                                        break
+                                if not exist:
+                                    new.append(c)
+                        else:
+                            new.extend(cls.__dict__.get(s, []))
+                    nattr[s] = new
+                cls = type(name, (cls, parent_class), nattr)
+        else:
+            # Duplicate float fields because they have a .digits
+            # cache (which must be per-pool, not server-wide).
+            for field_name, field in cls._columns.items():
+                if field._type == 'float':
+                    cls._columns[field_name] = copy.copy(field)
         obj = object.__new__(cls)
         obj.__init__(pool, cr)
         return obj
     createInstance = classmethod(createInstance)
 
-    def __init__(self, pool, cr):
-        pool.add(self._name, self)
-        self.pool = pool
-        orm.orm.__init__(self, cr)
-
-class Cacheable(object):
-
-    _cache = UpdateableDict()
-
-    def add(self, key, value):
-        self._cache[key] = value
-
-    def invalidate(self, key):
-        del self._cache[key]
-
-    def get(self, key):
-        try:
-            w = self._cache[key]
-            return w
-        except KeyError:
-            return None
-
-    def clear(self):
-        self._cache.clear()
-        self._items = []
-
-def filter_dict(d, fields):
-    res = {}
-    for f in fields + ['id']:
-        if f in d:
-            res[f] = d[f]
-    return res
-
-class cacheable_osv(osv, Cacheable):
-
-    _relevant = ['lang']
-
-    def __init__(self):
-        super(cacheable_osv, self).__init__()
-
-    def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
-        if not fields:
-            fields=[]
-        if not context:
-            context={}
-        fields = fields or self._columns.keys()
-        ctx = [context.get(x, False) for x in self._relevant]
-        result, tofetch = [], []
-        for id in ids:
-            res = self.get(self._name, id, ctx)
-            if not res:
-                tofetch.append(id)
-            else:
-                result.append(filter_dict(res, fields))
-
-        # gen the list of "local" (ie not inherited) fields which are classic or many2one
-        nfields = filter(lambda x: x[1]._classic_write, self._columns.items())
-        # gen the list of inherited fields
-        inherits = map(lambda x: (x[0], x[1][2]), self._inherit_fields.items())
-        # complete the field list with the inherited fields which are classic or many2one
-        nfields += filter(lambda x: x[1]._classic_write, inherits)
-        nfields = [x[0] for x in nfields]
-
-        res = super(cacheable_osv, self).read(cr, user, tofetch, nfields, context, load)
-        for r in res:
-            self.add((self._name, r['id'], ctx), r)
-            result.append(filter_dict(r, fields))
-
-        # Appel de fonction si necessaire
-        tofetch = []
-        for f in fields:
-            if f not in nfields:
-                tofetch.append(f)
-        for f in tofetch:
-            fvals = self._columns[f].get(cr, self, ids, f, user, context=context)
-            for r in result:
-                r[f] = fvals[r['id']]
-
-        # TODO: tri par self._order !!
-        return result
-
-    def invalidate(self, key):
-        del self._cache[key[0]][key[1]]
-
-    def write(self, cr, user, ids, values, context=None):
-        if not context:
-            context={}
-        for id in ids:
-            self.invalidate((self._name, id))
-        return super(cacheable_osv, self).write(cr, user, ids, values, context)
-
-    def unlink(self, cr, user, ids):
-        self.clear()
-        return super(cacheable_osv, self).unlink(cr, user, ids)
-
-#cacheable_osv = osv
-
-
-#class FakePool(object):
-#   def __init__(self, module):
-#       self.preferred_module = module
-
-#   def get(self, name):
-#       localpool = module_objects_dict.get(self.preferred_module, {'dict': {}})['dict']
-#       if name in localpool:
-#           obj = localpool[name]
-#       else:
-#           obj = pooler.get_pool(cr.dbname).get(name)
-#       return obj
-
-#       fake_pool = self
-#       class fake_class(obj.__class__):
-#           def __init__(self):
-#               super(fake_class, self).__init__()
-#               self.pool = fake_pool
-
-#       return fake_class()
-
-
 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: