class document_file(osv.osv):
_inherit = 'ir.attachment'
_rec_name = 'datas_fname'
+
+ def _attach_parent_id(self, cr, uid, ids=None, context=None):
+ """Migrate ir.attachments to the document module.
+
+ When the 'document' module is loaded on a db that has had plain attachments,
+ they will need to be attached to some parent folder, and be converted from
+ base64-in-bytea to raw-in-bytea format.
+ This function performs the internal migration, once and forever, for these
+ attachments. It cannot be done through the nominal ORM maintenance code,
+ because the root folder is only created after the document_data.xml file
+ is loaded.
+ It also establishes the parent_id NOT NULL constraint that ir.attachment
+ should have had (but would have failed if plain attachments contained null
+ values).
+ It also updates the File Size for the previously created attachments.
+ """
+
+ parent_id = self.pool.get('document.directory')._get_root_directory(cr,uid)
+ if not parent_id:
+ logging.getLogger('document').warning("at _attach_parent_id(), still not able to set the parent!")
+ return False
+
+ if ids is not None:
+ raise NotImplementedError("Ids is just there by convention! Don't use it yet, please.")
+
+ cr.execute("UPDATE ir_attachment " \
+ "SET parent_id = %s, db_datas = decode(encode(db_datas,'escape'), 'base64') " \
+ "WHERE parent_id IS NULL", (parent_id,))
+
+ cr.execute("ALTER TABLE ir_attachment ALTER parent_id SET NOT NULL")
+
+ #Proceeding to update the filesize of the corresponsing attachment
+ cr.execute('SELECT id, db_datas FROM ir_attachment WHERE file_size=0 AND db_datas IS NOT NULL')
+ old_attachments = cr.dictfetchall()
+
+ for attachment in old_attachments:
+ f_size = len(attachment['db_datas'])
+ cr.execute('UPDATE ir_attachment SET file_size=%s WHERE id=%s',(f_size,attachment['id']))
+
+ return True
+
def _get_filestore(self, cr):
return os.path.join(DMS_ROOT_PATH, cr.dbname)
- def _data_get(self, cr, uid, ids, name, arg, context):
+ def _data_get(self, cr, uid, ids, name, arg, context=None):
+ if context is None:
+ context = {}
fbrl = self.browse(cr, uid, ids, context=context)
nctx = nodes.get_node_context(cr, uid, context={})
# nctx will /not/ inherit the caller's context. Most of
#
# This code can be improved
#
- def _data_set(self, cr, uid, id, name, value, arg, context):
+ def _data_set(self, cr, uid, id, name, value, arg, context=None):
if not value:
return True
fbro = self.browse(cr, uid, id, context=context)
'parent_id': __get_def_directory
}
_sql_constraints = [
- # filename_uniq is not possible in pure SQL
+ # filename_uniq is not possible in pure SQL # ??
]
- def _check_duplication(self, cr, uid, vals, ids=[], op='create'):
- name = vals.get('name', False)
- parent_id = vals.get('parent_id', False)
- res_model = vals.get('res_model', False)
- res_id = vals.get('res_id', 0)
- if op == 'write':
- for file in self.browse(cr, uid, ids): # FIXME fields_only
- if not name:
- name = file.name
- if not parent_id:
- parent_id = file.parent_id and file.parent_id.id or False
- if not res_model:
- res_model = file.res_model and file.res_model or False
- if not res_id:
- res_id = file.res_id and file.res_id or 0
- res = self.search(cr, uid, [('id', '<>', file.id), ('name', '=', name), ('parent_id', '=', parent_id), ('res_model', '=', res_model), ('res_id', '=', res_id)])
- if len(res):
- return False
- if op == 'create':
- res = self.search(cr, uid, [('name', '=', name), ('parent_id', '=', parent_id), ('res_id', '=', res_id), ('res_model', '=', res_model)])
- if len(res):
+ def _check_duplication(self, cr, uid, ids, context=None):
+ # FIXME can be a SQL constraint: unique(name,parent_id,res_model,res_id)
+ for attach in self.browse(cr, uid, ids, context):
+ domain = [('id', '!=', attach.id),
+ ('name', '=', attach.name),
+ ('parent_id', '=', attach.parent_id.id),
+ ('res_model', '=', attach.res_model),
+ ('res_id', '=', attach.res_id),
+ ]
+ if self.search(cr, uid, domain, context=context):
return False
return True
+ _constraints = [
+ (_check_duplication, 'File name must be unique!', ['name', 'parent_id', 'res_model', 'res_id'])
+ ]
+
+ def check(self, cr, uid, ids, mode, context=None, values=None):
+ """Check access wrt. res_model, relax the rule of ir.attachment parent
+
+ With 'document' installed, everybody will have access to attachments of
+ any resources they can *read*.
+ """
+ return super(document_file, self).check(cr, uid, ids, mode='read',
+ context=context, values=values)
+
+ def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
+ # Grab ids, bypassing 'count'
+ ids = super(document_file, self).search(cr, uid, args, offset=offset,
+ limit=limit, order=order,
+ context=context, count=False)
+ if not ids:
+ return 0 if count else []
+
+ # Filter out documents that are in directories that the user is not allowed to read.
+ # Must use pure SQL to avoid access rules exceptions (we want to remove the records,
+ # not fail), and the records have been filtered in parent's search() anyway.
+ cr.execute('SELECT id, parent_id from "%s" WHERE id in %%s' % self._table, (tuple(ids),))
+ doc_pairs = cr.fetchall()
+ parent_ids = set(zip(*doc_pairs)[1])
+ visible_parent_ids = self.pool.get('document.directory').search(cr, uid, [('id', 'in', list(parent_ids))])
+ disallowed_parents = parent_ids.difference(visible_parent_ids)
+ for doc_id, parent_id in doc_pairs:
+ if parent_id in disallowed_parents:
+ ids.remove(doc_id)
+ return len(ids) if count else ids
+
+
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
if 'name' not in default:
- name = self.read(cr, uid, [id])[0]['name']
- default.update({'name': name + " (copy)"})
- return super(document_file, self).copy(cr, uid, id, default, context)
+ name = self.read(cr, uid, [id], ['name'])[0]['name']
+ default.update({'name': name + " " + _("(copy)")})
+ return super(document_file, self).copy(cr, uid, id, default, context=context)
def write(self, cr, uid, ids, vals, context=None):
result = False
res = self.search(cr, uid, [('id', 'in', ids)])
if not len(res):
return False
- if not self._check_duplication(cr, uid, vals, ids, 'write'):
- raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
# if nodes call this write(), they must skip the code below
from_node = context and context.get('__from_node', False)
ids2 = []
for fbro in self.browse(cr, uid, ids, context=context):
if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \
- and ('name' not in vals or fbro.name == vals['name']) :
+ and ('name' not in vals or fbro.name == vals['name']):
ids2.append(fbro.id)
continue
fnode = nctx.get_file_node(cr, fbro)
del vals['file_size']
if len(ids) and len(vals):
result = super(document_file,self).write(cr, uid, ids, vals, context=context)
- cr.commit() # ?
return result
def create(self, cr, uid, vals, context=None):
else:
if vals.get('file_size'):
del vals['file_size']
- if not self._check_duplication(cr, uid, vals):
- raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
result = super(document_file, self).create(cr, uid, vals, context)
- cr.commit() # ?
return result
- def __get_partner_id(self, cr, uid, res_model, res_id, context):
+ def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
""" A helper to retrieve the associated partner from any res_model+id
It is a hack that will try to discover if the mentioned record is
clearly associated with a partner record.
# rolled back) and then unlink the files. The list wouldn't exist
# after we discard the objects
ids = self.search(cr, uid, [('id','in',ids)])
- for f in self.browse(cr, uid, ids, context):
+ for f in self.browse(cr, uid, ids, context=context):
# TODO: update the node cache
par = f.parent_id
storage_id = None