[FIX] rights to unlink res.partner, and restrict Knowledge/Configuration/Document...
[odoo/odoo.git] / addons / document / document.py
index b4c5ee5..69e123b 100644 (file)
-# -*- encoding: utf-8 -*-
+# -*- coding: utf-8 -*-
 ##############################################################################
 #
 #    OpenERP, Open Source Management Solution
-#    Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
-#    $Id$
+#    Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
 #
 #    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
+#    it under the terms of the GNU Affero General Public License as
+#    published by the Free Software Foundation, either version 3 of the
+#    License, or (at your option) any later version.
 #
 #    This program is distributed in the hope that it will be useful,
 #    but WITHOUT ANY WARRANTY; without even the implied warranty of
 #    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
+#    GNU Affero General Public License for more details.
 #
-#    You should have received a copy of the GNU General Public License
+#    You should have received a copy of the GNU Affero General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #
 ##############################################################################
 
 import base64
-
 from osv import osv, fields
-from osv.orm import except_orm
-import urlparse
-
 import os
 
-import pooler
-from content_index import content_index
-import netsvc
-import StringIO
-
-import random
-import string
-from psycopg2 import Binary
-from tools import config
+# from psycopg2 import Binary
+#from tools import config
 import tools
 from tools.translate import _
+import nodes
+import logging
 
-def random_name():
-    random.seed()
-    d = [random.choice(string.ascii_letters) for x in xrange(10) ]
-    name = "".join(d)
-    return name
-
+DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config['root_path'], 'filestore'))
 
-# Unsupported WebDAV Commands:
-#     label
-#     search
-#     checkin
-#     checkout
-#     propget
-#     propset
-
-#
-# An object that represent an uri
-#   path: the uri of the object
-#   content: the Content it belongs to (_print.pdf)
-#   type: content or collection
-#       content: objct = res.partner
-#       collection: object = directory, object2 = res.partner
-#       file: objct = ir.attachement
-#   root: if we are at the first directory of a ressource
-#
-INVALID_CHARS={'*':str(hash('*')), '|':str(hash('|')) , "\\":str(hash("\\")), '/':'__', ':':str(hash(':')), '"':str(hash('"')), '<':str(hash('<')) , '>':str(hash('>')) , '?':str(hash('?'))}
-
-
-class node_class(object):
-    def __init__(self, cr, uid, path, object, object2=False, context={}, content=False, type='collection', root=False):
-        self.cr = cr
-        self.uid = uid
-        self.path = path
-        self.object = object
-        self.object2 = object2
-        self.context = context
-        self.content = content
-        self.type=type
-        self.root=root
-
-    def _file_get(self, nodename=False):
-        if not self.object:
-            return []
-        pool = pooler.get_pool(self.cr.dbname)
-        fobj = pool.get('ir.attachment')
-        res2 = []
-        where = []
-        if self.object2:
-            where.append( ('res_model','=',self.object2._name) )
-            where.append( ('res_id','=',self.object2.id) )
-        else:
-            where.append( ('parent_id','=',self.object.id) )
-            where.append( ('res_id','=',False) )
-        if nodename:
-            where.append( (fobj._rec_name,'=',nodename) )
-        for content in self.object.content_ids:
-            if self.object2 or not content.include_name:
-                if content.include_name:
-                    content_name = self.object2.name
-                    obj = pool.get(self.object.ressource_type_id.model)
-                    name_for = obj._name.split('.')[-1]            
-                    if content_name  and content_name.find(name_for) == 0  :
-                        content_name = content_name.replace(name_for,'')
-                    test_nodename = content_name + (content.suffix or '') + (content.extension or '')
-                else:
-                    test_nodename = (content.suffix or '') + (content.extension or '')
-                if test_nodename.find('/'):
-                    test_nodename=test_nodename.replace('/', '_')
-                path = self.path+'/'+test_nodename
-                if not nodename:
-                    n = node_class(self.cr, self.uid,path, self.object2, False, context=self.context, content=content, type='content', root=False)
-                    res2.append( n)
-                else:
-                    if nodename == test_nodename:
-                        n = node_class(self.cr, self.uid, path, self.object2, False, context=self.context, content=content, type='content', root=False)
-                        res2.append(n)
-
-        ids = fobj.search(self.cr, self.uid, where+[ ('parent_id','=',self.object and self.object.id or False) ])
-        if self.object and self.root and (self.object.type=='ressource'):
-            ids += fobj.search(self.cr, self.uid, where+[ ('parent_id','=',False) ])
-        res = fobj.browse(self.cr, self.uid, ids, context=self.context)
-        return map(lambda x: node_class(self.cr, self.uid, self.path+'/'+eval('x.'+fobj._rec_name), x, False, context=self.context, type='file', root=False), res) + res2
-    
-    def get_translation(self,value,lang):
-        result = value
-        pool = pooler.get_pool(self.cr.dbname)        
-        translation_ids = pool.get('ir.translation').search(self.cr, self.uid, [('value','=',value),('lang','=',lang),('type','=','model')])
-        if len(translation_ids):
-            tran_id = translation_ids[0]
-            translation = pool.get('ir.translation').read(self.cr, self.uid, tran_id, ['res_id','name'])
-            res_model,field_name = tuple(translation['name'].split(','))  
-            res_id = translation['res_id']        
-            res = pool.get(res_model).read(self.cr, self.uid, res_id, [field_name])
-            if res:
-                result = res[field_name]
-        return result 
-    
-    def directory_list_for_child(self,nodename,parent=False):
-        pool = pooler.get_pool(self.cr.dbname)
-        where = []
-        if nodename:    
-            nodename = self.get_translation(nodename, self.context['lang'])            
-            where.append(('name','=',nodename))
-        if (self.object and self.object.type=='directory') or not self.object2:
-            where.append(('parent_id','=',self.object and self.object.id or False))
-        else:
-            where.append(('parent_id','=',False))
-        if self.object:
-            where.append(('ressource_parent_type_id','=',self.object.ressource_type_id.id))
-        else:
-            where.append(('ressource_parent_type_id','=',False))
-
-        ids = pool.get('document.directory').search(self.cr, self.uid, where+[('ressource_id','=',0)])
-        if self.object2:
-            ids += pool.get('document.directory').search(self.cr, self.uid, where+[('ressource_id','=',self.object2.id)])        
-        res = pool.get('document.directory').browse(self.cr, self.uid, ids, self.context)
-        return res
-
-    def _child_get(self, nodename=False):        
-        if self.type not in ('collection','database'):
-            return []
-        res = self.directory_list_for_child(nodename)
-        result= map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name, x, x.type=='directory' and self.object2 or False, context=self.context, root=self.root), res)
-        if self.type=='database':
-            pool = pooler.get_pool(self.cr.dbname)
-            fobj = pool.get('ir.attachment')
-            vargs = [('parent_id','=',False),('res_id','=',False)]
-            if nodename:
-                vargs.append((fobj._rec_name,'=',nodename))
-            file_ids=fobj.search(self.cr,self.uid,vargs)
-
-            res = fobj.browse(self.cr, self.uid, file_ids, context=self.context)
-            result +=map(lambda x: node_class(self.cr, self.uid, self.path+'/'+eval('x.'+fobj._rec_name), x, False, context=self.context, type='file', root=self.root), res)
-        if self.type=='collection' and self.object.type=="ressource":
-            where = self.object.domain and eval(self.object.domain, {'active_id':self.root}) or []
-            pool = pooler.get_pool(self.cr.dbname)            
-            obj = pool.get(self.object.ressource_type_id.model)
-            _dirname_field = obj._rec_name
-            if len(obj.fields_get(self.cr, self.uid, ['dirname'])):
-                _dirname_field = 'dirname'            
-
-            name_for = obj._name.split('.')[-1]            
-            if nodename  and nodename.find(name_for) == 0  :
-                id = int(nodename.replace(name_for,''))
-                where.append(('id','=',id))
-            elif nodename:
-                if nodename.find('__') :
-                    nodename=nodename.replace('__','/')
-                for invalid in INVALID_CHARS:
-                    if nodename.find(INVALID_CHARS[invalid]) :
-                        nodename=nodename.replace(INVALID_CHARS[invalid],invalid)
-                nodename = self.get_translation(nodename, self.context['lang'])
-                where.append((_dirname_field,'=',nodename))
-
-            if self.object.ressource_tree:
-                if obj._parent_name in obj.fields_get(self.cr,self.uid):                    
-                    where.append((obj._parent_name,'=',self.object2 and self.object2.id or False))
-                    ids = obj.search(self.cr, self.uid, where)
-                    res = obj.browse(self.cr, self.uid, ids,self.context)
-                    result+= map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name.replace('/','__'), self.object, x, context=self.context, root=x.id), res)
-                    return result
-                else :
-                    if self.object2:
-                        return result
-            else:
-                if self.object2:
-                    return result
-
-            
-            ids = obj.search(self.cr, self.uid, where)
-            res = obj.browse(self.cr, self.uid, ids,self.context)
-            for r in res:                               
-                if len(obj.fields_get(self.cr, self.uid, [_dirname_field])):
-                    r.name = eval('r.'+_dirname_field)
-                else:
-                    r.name = False
-                if not r.name:
-                    r.name = name_for + '%d'%r.id               
-                for invalid in INVALID_CHARS:
-                    if r.name.find(invalid) :
-                        r.name = r.name.replace(invalid,INVALID_CHARS[invalid])            
-            result2 = map(lambda x: node_class(self.cr, self.uid, self.path+'/'+x.name.replace('/','__'), self.object, x, context=self.context, root=x.id), res)
-            if result2:
-                if self.object.ressource_tree:
-                    result += result2
-                else:
-                    result = result2                  
-        return result
-
-    def children(self):
-        return self._child_get() + self._file_get()
-
-    def child(self, name):
-        res = self._child_get(name)
-        if res:
-            return res[0]
-        res = self._file_get(name)
-        if res:
-            return res[0]
-        return None
-
-    def path_get(self):
-        path = self.path
-        if self.path[0]=='/':
-            path = self.path[1:]
-        return path
-
-class document_directory(osv.osv):
-    _name = 'document.directory'
-    _description = 'Document directory'
-    _columns = {
-        'name': fields.char('Name', size=64, required=True, select=1, translate=True),
-        'write_date': fields.datetime('Date Modified', readonly=True),
-        'write_uid':  fields.many2one('res.users', 'Last Modification User', readonly=True),
-        'create_date': fields.datetime('Date Created', readonly=True),
-        'create_uid':  fields.many2one('res.users', 'Creator', readonly=True),
-        'file_type': fields.char('Content Type', size=32),
-        'domain': fields.char('Domain', size=128, help="Use a domain if you want to apply an automatic filter on visible resources."),
-        'user_id': fields.many2one('res.users', 'Owner'),
-        'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'),
-        'parent_id': fields.many2one('document.directory', 'Parent Item'),
-        'child_ids': fields.one2many('document.directory', 'parent_id', 'Children'),
-        'file_ids': fields.one2many('ir.attachment', 'parent_id', 'Files'),
-        'content_ids': fields.one2many('document.directory.content', 'directory_id', 'Virtual Files'),
-        'type': fields.selection([('directory','Static Directory'),('ressource','Other Resources')], 'Type', required=True),
-        'ressource_type_id': fields.many2one('ir.model', 'Directories Mapped to Objects',
-            help="Select an object here and Open ERP will create a mapping for each of these " \
-                 "objects, using the given domain, when browsing through FTP."),
-        'ressource_parent_type_id': fields.many2one('ir.model', 'Parent Model',
-            help="If you put an object here, this directory template will appear bellow all of these objects. " \
-                 "Don't put a parent directory if you select a parent model."),
-        'ressource_id': fields.integer('Resource ID'),
-        'ressource_tree': fields.boolean('Tree Structure',
-            help="Check this if you want to use the same tree structure as the object selected in the system."),
-    }
-    _defaults = {
-        'user_id': lambda self,cr,uid,ctx: uid,
-        'domain': lambda self,cr,uid,ctx: '[]',
-        'type': lambda *args: 'directory',
-        'ressource_id': lambda *a: 0
-    }
-    _sql_constraints = [
-        ('dirname_uniq', 'unique (name,parent_id,ressource_id,ressource_parent_type_id)', 'The directory name must be unique !')
-    ]
-
-    def get_resource_path(self,cr,uid,dir_id,res_model,res_id):
-        # this method will be used in process module
-        # to be need test and Improvement if resource dir has parent resource (link resource)
-        path=[]
-        def _parent(dir_id,path):
-            parent=self.browse(cr,uid,dir_id)
-            if parent.parent_id and not parent.ressource_parent_type_id:
-                _parent(parent.parent_id.id,path)
-                path.append(parent.name)
-            else:
-                path.append(parent.name)
-                return path
-
-        directory=self.browse(cr,uid,dir_id)
-        model_ids=self.pool.get('ir.model').search(cr,uid,[('model','=',res_model)])
-        if directory:
-            _parent(dir_id,path)
-            path.append(self.pool.get(directory.ressource_type_id.model).browse(cr,uid,res_id).name)
-            user=self.pool.get('res.users').browse(cr,uid,uid)
-            return "ftp://%s:%s@localhost:%s/%s/%s"%(user.login,user.password,config.get('ftp_server_port',8021),cr.dbname,'/'.join(path))
-        return False
-
-    def _check_recursion(self, cr, uid, ids):
-        level = 100
-        while len(ids):
-            cr.execute('select distinct parent_id from document_directory where id in ('+','.join(map(str,ids))+')')
-            ids = filter(None, map(lambda x:x[0], cr.fetchall()))
-            if not level:
-                return False
-            level -= 1
-        return True
+class document_file(osv.osv):
+    _inherit = 'ir.attachment'
+    _rec_name = 'datas_fname'
 
-    _constraints = [
-        (_check_recursion, 'Error! You can not create recursive Directories.', ['parent_id'])
-    ]
-    def __init__(self, *args, **kwargs):
-        res = super(document_directory, self).__init__(*args, **kwargs)
-        self._cache = {}
-
-    def onchange_content_id(self, cr, uid, ids, ressource_type_id):
-        return {}
-
-    def _get_childs(self, cr, uid, node, nodename=False, context={}):
-        where = []
-        if nodename:
-            nodename = self.get_translation(nodename, self.context['lang'])
-            where.append(('name','=',nodename))
-        if object:
-            where.append(('parent_id','=',object.id))
-        ids = self.search(cr, uid, where, context)
-        return self.browse(cr, uid, ids, context), False
-
-    """
-        PRE:
-            uri: of the form "Sales Order/SO001"
-        PORT:
-            uri
-            object: the object.directory or object.directory.content
-            object2: the other object linked (if object.directory.content)
-    """
-    def get_object(self, cr, uid, uri, context={}):
-        lang = context.get('lang',False)
-        if not lang:
-            user = self.pool.get('res.users').browse(cr, uid, uid)
-            lang = user.context_lang 
-        context['lang'] = lang
-        if not uri:
-            return node_class(cr, uid, '', False, context=context, type='database')
-        turi = tuple(uri)
-        if False and (turi in self._cache):
-            (path, oo, oo2, context, content,type,root) = self._cache[turi]
-            if oo:
-                object = self.pool.get(oo[0]).browse(cr, uid, oo[1], context)
-            else:
-                object = False
-            if oo2:
-                object2 = self.pool.get(oo2[0]).browse(cr, uid, oo2[1], context)
-            else:
-                object2 = False
-            node = node_class(cr, uid, '/', False, context=context, type='database')
-            return node
-
-        node = node_class(cr, uid, '/', False, context=context, type='database')
-        for path in uri[:]:
-            if path:
-                node = node.child(path)
-                if not node:
-                    return False
-        oo = node.object and (node.object._name, node.object.id) or False
-        oo2 = node.object2 and (node.object2._name, node.object2.id) or False
-        self._cache[turi] = (node.path, oo, oo2, node.context, node.content,node.type,node.root)
-        return node
-
-    def get_childs(self, cr, uid, uri, context={}):
-        node = self.get_object(cr, uid, uri, context)
-        if uri:
-            children = node.children()
-        else:
-            children= [node]
-        result = map(lambda node: node.path_get(), children)
-        #childs,object2 = self._get_childs(cr, uid, object, False, context)
-        #result = map(lambda x: urlparse.urljoin(path+'/',x.name), childs)
-        return result
+    def _attach_parent_id(self, cr, uid, ids=None, context=None):
+        """Migrate ir.attachments to the document module.
+
+        When the 'document' module is loaded on a db that has had plain attachments,
+        they will need to be attached to some parent folder, and be converted from
+        base64-in-bytea to raw-in-bytea format.
+        This function performs the internal migration, once and forever, for these
+        attachments. It cannot be done through the nominal ORM maintenance code,
+        because the root folder is only created after the document_data.xml file
+        is loaded.
+        It also establishes the parent_id NOT NULL constraint that ir.attachment
+        should have had (but would have failed if plain attachments contained null
+        values).
+        It also updates the  File Size for the previously created attachments.
+        """
+
+        parent_id = self.pool.get('document.directory')._get_root_directory(cr,uid)
+        if not parent_id:
+            logging.getLogger('document').warning("at _attach_parent_id(), still not able to set the parent!")
+            return False
 
-    def copy(self, cr, uid, id, default=None, context=None):
-        if not default:
-            default ={}
-        name = self.read(cr, uid, [id])[0]['name']
-        default.update({'name': name+ " (copy)"})
-        return super(document_directory,self).copy(cr,uid,id,default,context)
-
-    def _check_duplication(self, cr, uid,vals,ids=[],op='create'):
-        name=vals.get('name',False)
-        parent_id=vals.get('parent_id',False)
-        ressource_parent_type_id=vals.get('ressource_parent_type_id',False)
-        ressource_id=vals.get('ressource_id',0)
-        if op=='write':
-            for directory in self.browse(cr,uid,ids):
-                if not name:
-                    name=directory.name
-                if not parent_id:
-                    parent_id=directory.parent_id and directory.parent_id.id or False
-                if not ressource_parent_type_id:
-                    ressource_parent_type_id=directory.ressource_parent_type_id and directory.ressource_parent_type_id.id or False
-                if not ressource_id:
-                    ressource_id=directory.ressource_id and directory.ressource_id or 0
-                res=self.search(cr,uid,[('id','<>',directory.id),('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
-                if len(res):
-                    return False
-        if op=='create':
-            res=self.search(cr,uid,[('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
-            if len(res):
-                return False
-        return True
-    def write(self, cr, uid, ids, vals, context=None):
-        if not self._check_duplication(cr,uid,vals,ids,op='write'):
-            raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
-        return super(document_directory,self).write(cr,uid,ids,vals,context=context)
+        if ids is not None:
+            raise NotImplementedError("Ids is just there by convention! Don't use it yet, please.")
 
-    def create(self, cr, uid, vals, context=None):
-        if not self._check_duplication(cr,uid,vals):
-            raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
-        if vals.get('name',False) and (vals.get('name').find('/')+1 or vals.get('name').find('@')+1 or vals.get('name').find('$')+1 or vals.get('name').find('#')+1) :
-            raise osv.except_osv(_('ValidateError'), _('Directory name contains special characters!'))
-        return super(document_directory,self).create(cr, uid, vals, context)
+        cr.execute("UPDATE ir_attachment " \
+                    "SET parent_id = %s, db_datas = decode(encode(db_datas,'escape'), 'base64') " \
+                    "WHERE parent_id IS NULL", (parent_id,))
 
-document_directory()
+        cr.execute("UPDATE ir_attachment SET file_size=length(db_datas) WHERE file_size = 0;")
 
-class document_directory_node(osv.osv):
-    _inherit = 'process.node'
-    _columns = {
-        'directory_id':  fields.many2one('document.directory', 'Document directory', ondelete="set null"),
-    }
-document_directory_node()
+        cr.execute("ALTER TABLE ir_attachment ALTER parent_id SET NOT NULL")
 
-class document_directory_content_type(osv.osv):
-    _name = 'document.directory.content.type'
-    _description = 'Directory Content Type'
-    _columns = {
-        'name': fields.char('Content Type', size=64, required=True),
-        'code': fields.char('Extension', size=4),
-        'active': fields.boolean('Active'),
-    }
-    _defaults = {
-        'active': lambda *args: 1
-    }
-document_directory_content_type()
-
-class document_directory_content(osv.osv):
-    _name = 'document.directory.content'
-    _description = 'Directory Content'
-    _order = "sequence"
-    def _extension_get(self, cr, uid, context={}):
-        cr.execute('select code,name from document_directory_content_type where active')
-        res = cr.fetchall()
-        return res
-    _columns = {
-        'name': fields.char('Content Name', size=64, required=True),
-        'sequence': fields.integer('Sequence', size=16),
-        'suffix': fields.char('Suffix', size=16),
-        'report_id': fields.many2one('ir.actions.report.xml', 'Report'),
-        'extension': fields.selection(_extension_get, 'Document Type', required=True, size=4),
-        'include_name': fields.boolean('Include Record Name', help="Check this field if you want that the name of the file start by the record name."),
-        'directory_id': fields.many2one('document.directory', 'Directory'),
-    }
-    _defaults = {
-        'extension': lambda *args: '.pdf',
-        'sequence': lambda *args: 1,
-        'include_name': lambda *args: 1,
-    }
-    def process_write_pdf(self, cr, uid, node, context={}):
         return True
-    def process_read_pdf(self, cr, uid, node, context={}):
-        report = self.pool.get('ir.actions.report.xml').browse(cr, uid, node.content.report_id.id)
-        srv = netsvc.LocalService('report.'+report.report_name)
-        pdf,pdftype = srv.create(cr, uid, [node.object.id], {}, {})
-        s = StringIO.StringIO(pdf)
-        s.name = node
-        return s
-document_directory_content()
-
-class ir_action_report_xml(osv.osv):
-    _name="ir.actions.report.xml"
-    _inherit ="ir.actions.report.xml"
-
-    def _model_get(self, cr, uid, ids, name, arg, context):
-        res = {}
-        model_pool = self.pool.get('ir.model')
-        for data in self.read(cr,uid,ids,['model']):
-            model = data.get('model',False)
-            if model:
-                model_id =model_pool.search(cr,uid,[('model','=',model)])
-                if model_id:
-                    res[data.get('id')] = model_id[0]
-                else:
-                    res[data.get('id')] = False
-        return res
 
-    def _model_search(self, cr, uid, obj, name, args):
-        if not len(args):
-            return []
-        model_id= args[0][2]
-        if not model_id:
-            return []
-        model = self.pool.get('ir.model').read(cr,uid,[model_id])[0]['model']
-        report_id = self.search(cr,uid,[('model','=',model)])
-        if not report_id:
-            return [('id','=','0')]
-        return [('id','in',report_id)]
-
-    _columns={
-        'model_id' : fields.function(_model_get,fnct_search=_model_search,method=True,string='Model Id'),
-    }
-
-ir_action_report_xml()
-
-def create_directory(path):
-    dir_name = random_name()
-    path = os.path.join(path,dir_name)
-    os.makedirs(path)
-    return dir_name
-
-class document_file(osv.osv):
-    _inherit = 'ir.attachment'
-    _rec_name = 'datas_fname'
     def _get_filestore(self, cr):
-        return os.path.join(tools.config['root_path'], 'filestore', cr.dbname)
-
-    def _data_get(self, cr, uid, ids, name, arg, context):
+        return os.path.join(DMS_ROOT_PATH, cr.dbname)
+
+    def _data_get(self, cr, uid, ids, name, arg, context=None):
+        if context is None:
+            context = {}
+        fbrl = self.browse(cr, uid, ids, context=context)
+        nctx = nodes.get_node_context(cr, uid, context={})
+        # nctx will /not/ inherit the caller's context. Most of
+        # it would be useless, anyway (like active_id, active_model,
+        # bin_size etc.)
         result = {}
-        cr.execute('select id,store_fname,link from ir_attachment where id in ('+','.join(map(str,ids))+')')
-        for id,r,l in cr.fetchall():
-            try:
-                value = file(os.path.join(self._get_filestore(cr), r), 'rb').read()
-                result[id] = base64.encodestring(value)
-            except:
-                result[id]=''
-
-            if context.get('bin_size', False):
-                result[id] = tools.human_size(len(result[id]))
+        bin_size = context.get('bin_size', False)
+        for fbro in fbrl:
+            fnode = nodes.node_file(None, None, nctx, fbro)
+            if not bin_size:
+                    data = fnode.get_data(cr, fbro)
+                    result[fbro.id] = base64.encodestring(data or '')
+            else:
+                    result[fbro.id] = fnode.get_data_len(cr, fbro)
 
         return result
 
     #
     # This code can be improved
     #
-    def _data_set(self, cr, obj, id, name, value, uid=None, context={}):
+    def _data_set(self, cr, uid, id, name, value, arg, context=None):
         if not value:
             return True
-        #if (not context) or context.get('store_method','fs')=='fs':
-        try:
-            path = self._get_filestore(cr)
-            if not os.path.isdir(path):
-                try:
-                    os.makedirs(path)
-                except:
-                    raise except_orm(_('Permission Denied !'), _('You do not permissions to write on the server side.'))
-
-            flag = None
-            # This can be improved
-            for dirs in os.listdir(path):
-                if os.path.isdir(os.path.join(path,dirs)) and len(os.listdir(os.path.join(path,dirs)))<4000:
-                    flag = dirs
-                    break
-            flag = flag or create_directory(path)
-            filename = random_name()
-            fname = os.path.join(path, flag, filename)
-            fp = file(fname,'wb')
-            v = base64.decodestring(value)
-            fp.write(v)
-            filesize = os.stat(fname).st_size
-            cr.execute('update ir_attachment set store_fname=%s,store_method=%s,file_size=%s where id=%s', (os.path.join(flag,filename),'fs',len(v),id))
-            return True
-        except Exception,e :
-            raise except_orm(_('Error!'), str(e))
+        fbro = self.browse(cr, uid, id, context=context)
+        nctx = nodes.get_node_context(cr, uid, context={})
+        fnode = nodes.node_file(None, None, nctx, fbro)
+        res = fnode.set_data(cr, base64.decodestring(value), fbro)
+        return res
 
     _columns = {
-        'user_id': fields.many2one('res.users', 'Owner', select=1),
-        'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'),
-        'parent_id': fields.many2one('document.directory', 'Directory', select=1),
-        'file_size': fields.integer('File Size', required=True),
-        'file_type': fields.char('Content Type', size=32),
-        'index_content': fields.text('Indexed Content'),
-        'write_date': fields.datetime('Date Modified', readonly=True),
-        'write_uid':  fields.many2one('res.users', 'Last Modification User', readonly=True),
+        # Columns from ir.attachment:
         'create_date': fields.datetime('Date Created', readonly=True),
         'create_uid':  fields.many2one('res.users', 'Creator', readonly=True),
-        'store_method': fields.selection([('db','Database'),('fs','Filesystem'),('link','Link')], "Storing Method"),
-        'datas': fields.function(_data_get,method=True,fnct_inv=_data_set,string='File Content',type="binary"),
-        'store_fname': fields.char('Stored Filename', size=200),
-        'res_model': fields.char('Attached Model', size=64), #res_model
-        'res_id': fields.integer('Attached ID'), #res_id
+        'write_date': fields.datetime('Date Modified', readonly=True),
+        'write_uid':  fields.many2one('res.users', 'Last Modification User', readonly=True),
+        'res_model': fields.char('Attached Model', size=64, readonly=True, change_default=True),
+        'res_id': fields.integer('Attached ID', readonly=True),
+
+        # If ir.attachment contained any data before document is installed, preserve
+        # the data, don't drop the column!
+        'db_datas': fields.binary('Data', oldname='datas'),
+        'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
+
+        # Fields of document:
+        'user_id': fields.many2one('res.users', 'Owner', select=1),
+        # 'group_ids': fields.many2many('res.groups', 'document_group_rel', 'item_id', 'group_id', 'Groups'),
+        # the directory id now is mandatory. It can still be computed automatically.
+        'parent_id': fields.many2one('document.directory', 'Directory', select=1, required=True, change_default=True),
+        'index_content': fields.text('Indexed Content'),
         'partner_id':fields.many2one('res.partner', 'Partner', select=1),
-        'title': fields.char('Resource Title',size=64),
+        'file_size': fields.integer('File Size', required=True),
+        'file_type': fields.char('Content Type', size=128),
+
+        # fields used for file storage
+        'store_fname': fields.char('Stored Filename', size=200),
     }
+    _order = "create_date desc"
+
+    def __get_def_directory(self, cr, uid, context=None):
+        dirobj = self.pool.get('document.directory')
+        return dirobj._get_root_directory(cr, uid, context)
 
     _defaults = {
-        'user_id': lambda self,cr,uid,ctx:uid,
-        'file_size': lambda self,cr,uid,ctx:0,
-        'store_method': lambda *args: 'db'
+        'user_id': lambda self, cr, uid, ctx:uid,
+        'file_size': lambda self, cr, uid, ctx:0,
+        'parent_id': __get_def_directory
     }
     _sql_constraints = [
-        ('filename_uniq', 'unique (name,parent_id,res_id,res_model)', 'The file name must be unique !')
+        # filename_uniq is not possible in pure SQL
     ]
-    def _check_duplication(self, cr, uid,vals,ids=[],op='create'):
-        name=vals.get('name',False)
-        parent_id=vals.get('parent_id',False)
-        res_model=vals.get('res_model',False)
-        res_id=vals.get('res_id',0)
-        if op=='write':
-            for file in self.browse(cr,uid,ids):
+    def _check_duplication(self, cr, uid, vals, ids=[], op='create'):
+        name = vals.get('name', False)
+        parent_id = vals.get('parent_id', False)
+        res_model = vals.get('res_model', False)
+        res_id = vals.get('res_id', 0)
+        if op == 'write':
+            for file in self.browse(cr, uid, ids): # FIXME fields_only
                 if not name:
-                    name=file.name
+                    name = file.name
                 if not parent_id:
-                    parent_id=file.parent_id and file.parent_id.id or False
+                    parent_id = file.parent_id and file.parent_id.id or False
                 if not res_model:
-                    res_model=file.res_model and file.res_model or False
+                    res_model = file.res_model and file.res_model or False
                 if not res_id:
-                    res_id=file.res_id and file.res_id or 0
-                res=self.search(cr,uid,[('id','<>',file.id),('name','=',name),('parent_id','=',parent_id),('res_model','=',res_model),('res_id','=',res_id)])
+                    res_id = file.res_id and file.res_id or 0
+                res = self.search(cr, uid, [('id', '<>', file.id), ('name', '=', name), ('parent_id', '=', parent_id), ('res_model', '=', res_model), ('res_id', '=', res_id)])
                 if len(res):
                     return False
-        if op=='create':
-            res=self.search(cr,uid,[('name','=',name),('parent_id','=',parent_id),('res_id','=',res_id),('res_model','=',res_model)])
+        if op == 'create':
+            res = self.search(cr, uid, [('name', '=', name), ('parent_id', '=', parent_id), ('res_id', '=', res_id), ('res_model', '=', res_model)])
             if len(res):
                 return False
         return True
+
+    def check(self, cr, uid, ids, mode, context=None, values=None):
+        """Check access wrt. res_model, relax the rule of ir.attachment parent
+
+        With 'document' installed, everybody will have access to attachments of
+        any resources they can *read*.
+        """
+        return super(document_file, self).check(cr, uid, ids, mode='read',
+                                            context=context, values=values)
+
+    def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
+        # Grab ids, bypassing 'count'
+        ids = super(document_file, self).search(cr, uid, args, offset=offset,
+                                                limit=limit, order=order,
+                                                context=context, count=False)
+        if not ids:
+            return 0 if count else []
+
+        # Filter out documents that are in directories that the user is not allowed to read.
+        # Must use pure SQL to avoid access rules exceptions (we want to remove the records,
+        # not fail), and the records have been filtered in parent's search() anyway.
+        cr.execute('SELECT id, parent_id from "%s" WHERE id in %%s' % self._table, (tuple(ids),))
+        doc_pairs = cr.fetchall()
+        parent_ids = set(zip(*doc_pairs)[1])
+        visible_parent_ids = self.pool.get('document.directory').search(cr, uid, [('id', 'in', list(parent_ids))])
+        disallowed_parents = parent_ids.difference(visible_parent_ids)
+        for doc_id, parent_id in doc_pairs:
+            if parent_id in disallowed_parents:
+                ids.remove(doc_id)
+        return len(ids) if count else ids
+
+
     def copy(self, cr, uid, id, default=None, context=None):
         if not default:
-            default ={}
-        name = self.read(cr, uid, [id])[0]['name']
-        default.update({'name': name+ " (copy)"})
-        return super(document_file,self).copy(cr,uid,id,default,context)
+            default = {}
+        if 'name' not in default:
+            name = self.read(cr, uid, [id], ['name'])[0]['name']
+            default.update({'name': name + " " + _("(copy)")})
+        return super(document_file, self).copy(cr, uid, id, default, context=context)
+
     def write(self, cr, uid, ids, vals, context=None):
-        res=self.search(cr,uid,[('id','in',ids)])
+        result = False
+        if not isinstance(ids, list):
+            ids = [ids]
+        res = self.search(cr, uid, [('id', 'in', ids)])
         if not len(res):
             return False
-        if not self._check_duplication(cr,uid,vals,ids,'write'):
-            raise except_orm(_('ValidateError'), _('File name must be unique!'))
-        result = super(document_file,self).write(cr,uid,ids,vals,context=context)
-        cr.commit()
-        try:
-            for f in self.browse(cr, uid, ids, context=context):
-                #if 'datas' not in vals:
-                #    vals['datas']=f.datas
-                res = content_index(base64.decodestring(vals['datas']), f.datas_fname, f.file_type or None)
-                super(document_file,self).write(cr, uid, ids, {
-                    'index_content': res
-                })
-            cr.commit()
-        except:
-            pass
+        if not self._check_duplication(cr, uid, vals, ids, 'write'):
+            raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
+
+        # if nodes call this write(), they must skip the code below
+        from_node = context and context.get('__from_node', False)
+        if (('parent_id' in vals) or ('name' in vals)) and not from_node:
+            # perhaps this file is renaming or changing directory
+            nctx = nodes.get_node_context(cr,uid,context={})
+            dirobj = self.pool.get('document.directory')
+            if 'parent_id' in vals:
+                dbro = dirobj.browse(cr, uid, vals['parent_id'], context=context)
+                dnode = nctx.get_dir_node(cr, dbro)
+            else:
+                dbro = None
+                dnode = None
+            ids2 = []
+            for fbro in self.browse(cr, uid, ids, context=context):
+                if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \
+                    and ('name' not in vals or fbro.name == vals['name']):
+                        ids2.append(fbro.id)
+                        continue
+                fnode = nctx.get_file_node(cr, fbro)
+                res = fnode.move_to(cr, dnode or fnode.parent, vals.get('name', fbro.name), fbro, dbro, True)
+                if isinstance(res, dict):
+                    vals2 = vals.copy()
+                    vals2.update(res)
+                    wid = res.get('id', fbro.id)
+                    result = super(document_file,self).write(cr,uid,wid,vals2,context=context)
+                    # TODO: how to handle/merge several results?
+                elif res == True:
+                    ids2.append(fbro.id)
+                elif res == False:
+                    pass
+            ids = ids2
+        if 'file_size' in vals: # only write that field using direct SQL calls
+            del vals['file_size']
+        if len(ids) and len(vals):
+            result = super(document_file,self).write(cr, uid, ids, vals, context=context)
+        cr.commit() # ?
         return result
 
-    def create(self, cr, uid, vals, context={}):
-        vals['title']=vals['name']
-        vals['parent_id'] = context.get('parent_id',False) or vals.get('parent_id',False)
-        if not vals.get('res_id', False) and context.get('default_res_id',False):
-            vals['res_id']=context.get('default_res_id',False)
-        if not vals.get('res_model', False) and context.get('default_res_model',False):
-            vals['res_model']=context.get('default_res_model',False)
-        if vals.get('res_id', False) and vals.get('res_model',False):
-            obj_model=self.pool.get(vals['res_model'])
-            result = obj_model.read(cr, uid, [vals['res_id']], context=context)
-            if len(result):
-                obj=result[0]
-                if obj.get('name',False):
-                    vals['title'] = (obj.get('name',''))[:60]
-                if obj_model._name=='res.partner':
-                    vals['partner_id']=obj['id']
-                elif obj.get('address_id',False):
-                    if isinstance(obj['address_id'],tuple) or isinstance(obj['address_id'],list):
-                        address_id=obj['address_id'][0]
-                    else:
-                        address_id=obj['address_id']
-                    address=self.pool.get('res.partner.address').read(cr,uid,[address_id],context=context)
-                    if len(address):
-                        vals['partner_id']=address[0]['partner_id'][0] or False
-                elif obj.get('partner_id',False):
-                    if isinstance(obj['partner_id'],tuple) or isinstance(obj['partner_id'],list):
-                        vals['partner_id']=obj['partner_id'][0]
-                    else:
-                        vals['partner_id']=obj['partner_id']
-
-        datas=None
-        if vals.get('link',False) :
+    def create(self, cr, uid, vals, context=None):
+        if context is None:
+            context = {}
+        vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
+        if not vals['parent_id']:
+            vals['parent_id'] = self.pool.get('document.directory')._get_root_directory(cr,uid, context)
+        if not vals.get('res_id', False) and context.get('default_res_id', False):
+            vals['res_id'] = context.get('default_res_id', False)
+        if not vals.get('res_model', False) and context.get('default_res_model', False):
+            vals['res_model'] = context.get('default_res_model', False)
+        if vals.get('res_id', False) and vals.get('res_model', False) \
+                and not vals.get('partner_id', False):
+            vals['partner_id'] = self.__get_partner_id(cr, uid, \
+                vals['res_model'], vals['res_id'], context)
+
+        datas = None
+        if vals.get('link', False) :
             import urllib
-            datas=base64.encodestring(urllib.urlopen(vals['link']).read())
+            datas = base64.encodestring(urllib.urlopen(vals['link']).read())
         else:
-            datas=vals.get('datas',False)
-        vals['file_size']= len(datas)
-        if not self._check_duplication(cr,uid,vals):
-            raise except_orm(_('ValidateError'), _('File name must be unique!'))
-        result = super(document_file,self).create(cr, uid, vals, context)
-        cr.commit()
-        try:
-            res = content_index(base64.decodestring(datas), vals['datas_fname'], vals.get('content_type', None))
-            super(document_file,self).write(cr, uid, [result], {
-                'index_content': res,
-            })
-            cr.commit()
-        except:
-            pass
-        return result
+            datas = vals.get('datas', False)
 
-    def unlink(self,cr, uid, ids, context={}):
-        for f in self.browse(cr, uid, ids, context):
-            #if f.store_method=='fs':
-            try:
-                os.unlink(os.path.join(self._get_filestore(cr), f.store_fname))
-            except:
-                pass
-        return super(document_file, self).unlink(cr, uid, ids, context)
-document_file()
+        if datas:
+            vals['file_size'] = len(datas)
+        else:
+            if vals.get('file_size'):
+                del vals['file_size']
+        result = self._check_duplication(cr, uid, vals)
+        if not result:
+            domain = [
+                ('res_id', '=', vals['res_id']),
+                ('res_model', '=', vals['res_model']),
+                ('datas_fname', '=', vals['datas_fname']),
+            ]
+            attach_ids = self.search(cr, uid, domain, context=context)
+            super(document_file, self).write(cr, uid, attach_ids, 
+                                             {'datas' : vals['datas']},
+                                             context=context)
+            result = attach_ids[0]
+        else:
+            #raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
+            result = super(document_file, self).create(cr, uid, vals, context)
+            cr.commit() # ?
+        return result
 
-class document_configuration_wizard(osv.osv_memory):
-    _name='document.configuration.wizard'
-    _rec_name = 'Auto Directory configuration'
-    _columns = {
-        'host': fields.char('Server Address', size=64, help="Put here the server address or IP. " \
-            "Keep localhost if you don't know what to write.", required=True)
-    }
+    def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
+        """ A helper to retrieve the associated partner from any res_model+id
+            It is a hack that will try to discover if the mentioned record is
+            clearly associated with a partner record.
+        """
+        obj_model = self.pool.get(res_model)
+        if obj_model._name == 'res.partner':
+            return res_id
+        elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
+            bro = obj_model.browse(cr, uid, res_id, context=context)
+            return bro.partner_id.id
+        elif 'address_id' in obj_model._columns and obj_model._columns['address_id']._obj == 'res.partner.address':
+            bro = obj_model.browse(cr, uid, res_id, context=context)
+            return bro.address_id.partner_id.id
+        return False
 
-    def detect_ip_addr(self, cr, uid, context=None):
-        def _detect_ip_addr(self, cr, uid, context=None):
-            from array import array
-            import socket
-            from struct import pack, unpack
-
-            try:
-                import fcntl
-            except ImportError:
-                fcntl = None
-
-            if not fcntl: # not UNIX:
-                host = socket.gethostname()
-                ip_addr = socket.gethostbyname(host)
-            else: # UNIX:
-                # get all interfaces:
-                nbytes = 128 * 32
-                s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-                names = array('B', '\0' * nbytes)
-                outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
-                namestr = names.tostring()
-                ifaces = [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)]
-
-                for ifname in [iface for iface in ifaces if iface != 'lo']:
-                    ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
+    def unlink(self, cr, uid, ids, context=None):
+        stor = self.pool.get('document.storage')
+        unres = []
+        # We have to do the unlink in 2 stages: prepare a list of actual
+        # files to be unlinked, update the db (safer to do first, can be
+        # rolled back) and then unlink the files. The list wouldn't exist
+        # after we discard the objects
+        ids = self.search(cr, uid, [('id','in',ids)])
+        for f in self.browse(cr, uid, ids, context=context):
+            # TODO: update the node cache
+            par = f.parent_id
+            storage_id = None
+            while par:
+                if par.storage_id:
+                    storage_id = par.storage_id
                     break
-            return ip_addr
-
-        try:
-            ip_addr = _detect_ip_addr(self, cr, uid, context)
-        except:
-            ip_addr = 'localhost'
-        return ip_addr
+                par = par.parent_id
+            #assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
+            if storage_id:
+                r = stor.prepare_unlink(cr, uid, storage_id, f)
+                if r:
+                    unres.append(r)
+            else:
+                logging.getLogger('document').warning("Unlinking attachment #%s %s that has no storage",
+                                                f.id, f.name)
+        res = super(document_file, self).unlink(cr, uid, ids, context)
+        stor.do_unlink(cr, uid, unres)
+        return res
 
-    _defaults = {
-        'host': detect_ip_addr,
-    }
+document_file()
 
-    def action_cancel(self,cr,uid,ids,conect=None):
-        return {
-                'view_type': 'form',
-                "view_mode": 'form',
-                'res_model': 'ir.actions.configuration.wizard',
-                'type': 'ir.actions.act_window',
-                'target':'new',
-         }
-
-    def action_config(self, cr, uid, ids, context=None):
-        conf = self.browse(cr, uid, ids[0], context)
-        obj=self.pool.get('document.directory')
-        objid=self.pool.get('ir.model.data')
-
-        if self.pool.get('sale.order'):
-            id = objid._get_id(cr, uid, 'document', 'dir_sale_order_all')
-            id = objid.browse(cr, uid, id, context=context).res_id
-            mid = self.pool.get('ir.model').search(cr, uid, [('model','=','sale.order')])
-            obj.write(cr, uid, [id], {
-                'type':'ressource',
-                'ressource_type_id': mid[0],
-                'domain': '[]',
-            })
-            aid = objid._get_id(cr, uid, 'sale', 'report_sale_order')
-            aid = objid.browse(cr, uid, aid, context=context).res_id
-
-            self.pool.get('document.directory.content').create(cr, uid, {
-                'name': "Print Order",
-                'suffix': "_print",
-                'report_id': aid,
-                'extension': '.pdf',
-                'include_name': 1,
-                'directory_id': id,
-            })
-            id = objid._get_id(cr, uid, 'document', 'dir_sale_order_quote')
-            id = objid.browse(cr, uid, id, context=context).res_id
-            obj.write(cr, uid, [id], {
-                'type':'ressource',
-                'ressource_type_id': mid[0],
-                'domain': "[('state','=','draft')]",
-            })
-
-        if self.pool.get('product.product'):
-            id = objid._get_id(cr, uid, 'document', 'dir_product')
-            id = objid.browse(cr, uid, id, context=context).res_id
-            mid = self.pool.get('ir.model').search(cr, uid, [('model','=','product.product')])
-            obj.write(cr, uid, [id], {
-                'type':'ressource',
-                'ressource_type_id': mid[0],
-            })
-
-        if self.pool.get('stock.location'):
-            aid = objid._get_id(cr, uid, 'stock', 'report_product_history')
-            aid = objid.browse(cr, uid, aid, context=context).res_id
-
-            self.pool.get('document.directory.content').create(cr, uid, {
-                'name': "Product Stock",
-                'suffix': "_stock_forecast",
-                'report_id': aid,
-                'extension': '.pdf',
-                'include_name': 1,
-                'directory_id': id,
-            })
-
-        if self.pool.get('account.analytic.account'):
-            id = objid._get_id(cr, uid, 'document', 'dir_project')
-            id = objid.browse(cr, uid, id, context=context).res_id
-            mid = self.pool.get('ir.model').search(cr, uid, [('model','=','account.analytic.account')])
-            obj.write(cr, uid, [id], {
-                'type':'ressource',
-                'ressource_type_id': mid[0],
-                'domain': '[]',
-                'ressource_tree': 1
-        })
-
-        aid = objid._get_id(cr, uid, 'document', 'action_document_browse')
-        aid = objid.browse(cr, uid, aid, context=context).res_id
-        self.pool.get('ir.actions.url').write(cr, uid, [aid], {'url': 'ftp://'+(conf.host or 'localhost')+':8021/'})
-
-        return {
-                'view_type': 'form',
-                "view_mode": 'form',
-                'res_model': 'ir.actions.configuration.wizard',
-                'type': 'ir.actions.act_window',
-                'target': 'new',
-        }
-document_configuration_wizard()