1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
23 from osv import osv, fields
26 # from psycopg2 import Binary
27 #from tools import config
29 from tools.translate import _
33 DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config['root_path'], 'filestore'))
35 class document_file(osv.osv):
36 _inherit = 'ir.attachment'
37 _rec_name = 'datas_fname'
39 def _attach_parent_id(self, cr, uid, ids=None, context=None):
40 """Migrate ir.attachments to the document module.
42 When the 'document' module is loaded on a db that has had plain attachments,
43 they will need to be attached to some parent folder, and be converted from
44 base64-in-bytea to raw-in-bytea format.
45 This function performs the internal migration, once and forever, for these
46 attachments. It cannot be done through the nominal ORM maintenance code,
47 because the root folder is only created after the document_data.xml file
49 It also establishes the parent_id NOT NULL constraint that ir.attachment
50 should have had (but would have failed if plain attachments contained null
52 It also updates the File Size for the previously created attachments.
55 parent_id = self.pool.get('document.directory')._get_root_directory(cr,uid)
57 logging.getLogger('document').warning("at _attach_parent_id(), still not able to set the parent!")
61 raise NotImplementedError("Ids is just there by convention! Don't use it yet, please.")
63 cr.execute("UPDATE ir_attachment " \
64 "SET parent_id = %s, db_datas = decode(encode(db_datas,'escape'), 'base64') " \
65 "WHERE parent_id IS NULL", (parent_id,))
67 cr.execute("ALTER TABLE ir_attachment ALTER parent_id SET NOT NULL")
69 #Proceeding to update the filesize of the corresponsing attachment
70 cr.execute('SELECT id, db_datas FROM ir_attachment WHERE file_size=0 AND db_datas IS NOT NULL')
71 old_attachments = cr.dictfetchall()
73 for attachment in old_attachments:
74 f_size = len(attachment['db_datas'])
75 cr.execute('UPDATE ir_attachment SET file_size=%s WHERE id=%s',(f_size,attachment['id']))
79 def _get_filestore(self, cr):
80 return os.path.join(DMS_ROOT_PATH, cr.dbname)
82 def _data_get(self, cr, uid, ids, name, arg, context=None):
85 fbrl = self.browse(cr, uid, ids, context=context)
86 nctx = nodes.get_node_context(cr, uid, context={})
87 # nctx will /not/ inherit the caller's context. Most of
88 # it would be useless, anyway (like active_id, active_model,
91 bin_size = context.get('bin_size', False)
93 fnode = nodes.node_file(None, None, nctx, fbro)
95 data = fnode.get_data(cr, fbro)
96 result[fbro.id] = base64.encodestring(data or '')
98 result[fbro.id] = fnode.get_data_len(cr, fbro)
103 # This code can be improved
105 def _data_set(self, cr, uid, id, name, value, arg, context=None):
108 fbro = self.browse(cr, uid, id, context=context)
109 nctx = nodes.get_node_context(cr, uid, context={})
110 fnode = nodes.node_file(None, None, nctx, fbro)
111 res = fnode.set_data(cr, base64.decodestring(value), fbro)
115 # Columns from ir.attachment:
116 'create_date': fields.datetime('Date Created', readonly=True),
117 'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
118 'write_date': fields.datetime('Date Modified', readonly=True),
119 'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
120 'res_model': fields.char('Attached Model', size=64, readonly=True, change_default=True),
121 'res_id': fields.integer('Attached ID', readonly=True),
123 # If ir.attachment contained any data before document is installed, preserve
124 # the data, don't drop the column!
125 'db_datas': fields.binary('Data', oldname='datas'),
126 'datas': fields.function(_data_get, method=True, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
128 # Fields of document:
129 'user_id': fields.many2one('res.users', 'Owner', select=1),
130 # 'group_ids': fields.many2many('res.groups', 'document_group_rel', 'item_id', 'group_id', 'Groups'),
131 # the directory id now is mandatory. It can still be computed automatically.
132 'parent_id': fields.many2one('document.directory', 'Directory', select=1, required=True, change_default=True),
133 'index_content': fields.text('Indexed Content'),
134 'partner_id':fields.many2one('res.partner', 'Partner', select=1),
135 'file_size': fields.integer('File Size', required=True),
136 'file_type': fields.char('Content Type', size=128),
138 # fields used for file storage
139 'store_fname': fields.char('Stored Filename', size=200),
141 _order = "create_date desc"
143 def __get_def_directory(self, cr, uid, context=None):
144 dirobj = self.pool.get('document.directory')
145 return dirobj._get_root_directory(cr, uid, context)
148 'user_id': lambda self, cr, uid, ctx:uid,
149 'file_size': lambda self, cr, uid, ctx:0,
150 'parent_id': __get_def_directory
153 # filename_uniq is not possible in pure SQL # ??
155 def _check_duplication(self, cr, uid, ids, context=None):
156 # FIXME can be a SQL constraint: unique(name,parent_id,res_model,res_id)
157 for attach in self.browse(cr, uid, ids, context):
158 domain = [('id', '!=', attach.id),
159 ('name', '=', attach.name),
160 ('parent_id', '=', attach.parent_id.id),
161 ('res_model', '=', attach.res_model),
162 ('res_id', '=', attach.res_id),
164 if self.search(cr, uid, domain, context=context):
169 (_check_duplication, 'File name must be unique!', ['name', 'parent_id', 'res_model', 'res_id'])
172 def check(self, cr, uid, ids, mode, context=None, values=None):
173 """Check access wrt. res_model, relax the rule of ir.attachment parent
175 With 'document' installed, everybody will have access to attachments of
176 any resources they can *read*.
178 return super(document_file, self).check(cr, uid, ids, mode='read',
179 context=context, values=values)
181 def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
182 # Grab ids, bypassing 'count'
183 ids = super(document_file, self).search(cr, uid, args, offset=offset,
184 limit=limit, order=order,
185 context=context, count=False)
187 return 0 if count else []
189 # Work with a set, as list.remove() is prohibitive for large lists of documents
190 # (takes 20+ seconds on a db with 100k docs during search_count()!)
193 # Filter out documents that are in directories that the user is not allowed to read.
194 # Must use pure SQL to avoid access rules exceptions (we want to remove the records,
195 # not fail), and the records have been filtered in parent's search() anyway.
196 cr.execute('SELECT id, parent_id from "%s" WHERE id in %%s' % self._table, (tuple(ids),))
197 doc_pairs = cr.fetchall()
198 parent_ids = set(zip(*doc_pairs)[1])
199 visible_parent_ids = self.pool.get('document.directory').search(cr, uid, [('id', 'in', list(parent_ids))])
200 disallowed_parents = parent_ids.difference(visible_parent_ids)
201 for doc_id, parent_id in doc_pairs:
202 if parent_id in disallowed_parents:
205 return len(ids) if count else list(ids)
207 def copy(self, cr, uid, id, default=None, context=None):
210 if 'name' not in default:
211 name = self.read(cr, uid, [id], ['name'])[0]['name']
212 default.update({'name': name + " " + _("(copy)")})
213 return super(document_file, self).copy(cr, uid, id, default, context=context)
215 def write(self, cr, uid, ids, vals, context=None):
217 if not isinstance(ids, list):
219 res = self.search(cr, uid, [('id', 'in', ids)])
223 # if nodes call this write(), they must skip the code below
224 from_node = context and context.get('__from_node', False)
225 if (('parent_id' in vals) or ('name' in vals)) and not from_node:
226 # perhaps this file is renaming or changing directory
227 nctx = nodes.get_node_context(cr,uid,context={})
228 dirobj = self.pool.get('document.directory')
229 if 'parent_id' in vals:
230 dbro = dirobj.browse(cr, uid, vals['parent_id'], context=context)
231 dnode = nctx.get_dir_node(cr, dbro)
236 for fbro in self.browse(cr, uid, ids, context=context):
237 if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \
238 and ('name' not in vals or fbro.name == vals['name']):
241 fnode = nctx.get_file_node(cr, fbro)
242 res = fnode.move_to(cr, dnode or fnode.parent, vals.get('name', fbro.name), fbro, dbro, True)
243 if isinstance(res, dict):
246 wid = res.get('id', fbro.id)
247 result = super(document_file,self).write(cr,uid,wid,vals2,context=context)
248 # TODO: how to handle/merge several results?
254 if 'file_size' in vals: # only write that field using direct SQL calls
255 del vals['file_size']
256 if len(ids) and len(vals):
257 result = super(document_file,self).write(cr, uid, ids, vals, context=context)
260 def create(self, cr, uid, vals, context=None):
263 vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
264 if not vals['parent_id']:
265 vals['parent_id'] = self.pool.get('document.directory')._get_root_directory(cr,uid, context)
266 if not vals.get('res_id', False) and context.get('default_res_id', False):
267 vals['res_id'] = context.get('default_res_id', False)
268 if not vals.get('res_model', False) and context.get('default_res_model', False):
269 vals['res_model'] = context.get('default_res_model', False)
270 if vals.get('res_id', False) and vals.get('res_model', False) \
271 and not vals.get('partner_id', False):
272 vals['partner_id'] = self.__get_partner_id(cr, uid, \
273 vals['res_model'], vals['res_id'], context)
276 if vals.get('link', False) :
278 datas = base64.encodestring(urllib.urlopen(vals['link']).read())
280 datas = vals.get('datas', False)
283 vals['file_size'] = len(datas)
285 if vals.get('file_size'):
286 del vals['file_size']
287 result = super(document_file, self).create(cr, uid, vals, context)
290 def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
291 """ A helper to retrieve the associated partner from any res_model+id
292 It is a hack that will try to discover if the mentioned record is
293 clearly associated with a partner record.
295 obj_model = self.pool.get(res_model)
296 if obj_model._name == 'res.partner':
298 elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
299 bro = obj_model.browse(cr, uid, res_id, context=context)
300 return bro.partner_id.id
301 elif 'address_id' in obj_model._columns and obj_model._columns['address_id']._obj == 'res.partner.address':
302 bro = obj_model.browse(cr, uid, res_id, context=context)
303 return bro.address_id.partner_id.id
306 def unlink(self, cr, uid, ids, context=None):
307 stor = self.pool.get('document.storage')
309 # We have to do the unlink in 2 stages: prepare a list of actual
310 # files to be unlinked, update the db (safer to do first, can be
311 # rolled back) and then unlink the files. The list wouldn't exist
312 # after we discard the objects
313 ids = self.search(cr, uid, [('id','in',ids)])
314 for f in self.browse(cr, uid, ids, context=context):
315 # TODO: update the node cache
320 storage_id = par.storage_id
323 #assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
325 r = stor.prepare_unlink(cr, uid, storage_id, f)
329 logging.getLogger('document').warning("Unlinking attachment #%s %s that has no storage",
331 res = super(document_file, self).unlink(cr, uid, ids, context)
332 stor.do_unlink(cr, uid, unres)