1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
23 from osv import osv, fields
26 # from psycopg2 import Binary
27 #from tools import config
29 from tools.translate import _
33 DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config['root_path'], 'filestore'))
35 class document_file(osv.osv):
36 _inherit = 'ir.attachment'
37 _rec_name = 'datas_fname'
39 def _attach_parent_id(self, cr, uid, ids=None, context=None):
40 """Migrate ir.attachments to the document module.
42 When the 'document' module is loaded on a db that has had plain attachments,
43 they will need to be attached to some parent folder, and be converted from
44 base64-in-bytea to raw-in-bytea format.
45 This function performs the internal migration, once and forever, for these
46 attachments. It cannot be done through the nominal ORM maintenance code,
47 because the root folder is only created after the document_data.xml file
49 It also establishes the parent_id NOT NULL constraint that ir.attachment
50 should have had (but would have failed if plain attachments contained null
52 It also updates the File Size for the previously created attachments.
55 parent_id = self.pool.get('document.directory')._get_root_directory(cr,uid)
57 logging.getLogger('document').warning("at _attach_parent_id(), still not able to set the parent!")
61 raise NotImplementedError("Ids is just there by convention! Don't use it yet, please.")
63 cr.execute("UPDATE ir_attachment " \
64 "SET parent_id = %s, db_datas = decode(encode(db_datas,'escape'), 'base64') " \
65 "WHERE parent_id IS NULL", (parent_id,))
67 cr.execute("UPDATE ir_attachment SET file_size=length(db_datas) WHERE file_size = 0 and type = 'binary'")
69 cr.execute("ALTER TABLE ir_attachment ALTER parent_id SET NOT NULL")
73 def _get_filestore(self, cr):
74 return os.path.join(DMS_ROOT_PATH, cr.dbname)
76 def _data_get(self, cr, uid, ids, name, arg, context=None):
79 fbrl = self.browse(cr, uid, ids, context=context)
80 nctx = nodes.get_node_context(cr, uid, context={})
81 # nctx will /not/ inherit the caller's context. Most of
82 # it would be useless, anyway (like active_id, active_model,
85 bin_size = context.get('bin_size', False)
87 fnode = nodes.node_file(None, None, nctx, fbro)
89 data = fnode.get_data(cr, fbro)
90 result[fbro.id] = base64.encodestring(data or '')
92 result[fbro.id] = fnode.get_data_len(cr, fbro)
97 # This code can be improved
99 def _data_set(self, cr, uid, id, name, value, arg, context=None):
102 fbro = self.browse(cr, uid, id, context=context)
103 nctx = nodes.get_node_context(cr, uid, context={})
104 fnode = nodes.node_file(None, None, nctx, fbro)
105 res = fnode.set_data(cr, base64.decodestring(value), fbro)
109 # Columns from ir.attachment:
110 'create_date': fields.datetime('Date Created', readonly=True),
111 'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
112 'write_date': fields.datetime('Date Modified', readonly=True),
113 'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
114 'res_model': fields.char('Attached Model', size=64, readonly=True, change_default=True),
115 'res_id': fields.integer('Attached ID', readonly=True),
117 # If ir.attachment contained any data before document is installed, preserve
118 # the data, don't drop the column!
119 'db_datas': fields.binary('Data', oldname='datas'),
120 'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
122 # Fields of document:
123 'user_id': fields.many2one('res.users', 'Owner', select=1),
124 # 'group_ids': fields.many2many('res.groups', 'document_group_rel', 'item_id', 'group_id', 'Groups'),
125 # the directory id now is mandatory. It can still be computed automatically.
126 'parent_id': fields.many2one('document.directory', 'Directory', select=1, required=True, change_default=True),
127 'index_content': fields.text('Indexed Content'),
128 'partner_id':fields.many2one('res.partner', 'Partner', select=1),
129 'file_size': fields.integer('File Size', required=True),
130 'file_type': fields.char('Content Type', size=128),
132 # fields used for file storage
133 'store_fname': fields.char('Stored Filename', size=200),
137 def __get_def_directory(self, cr, uid, context=None):
138 dirobj = self.pool.get('document.directory')
139 return dirobj._get_root_directory(cr, uid, context)
142 'user_id': lambda self, cr, uid, ctx:uid,
143 'file_size': lambda self, cr, uid, ctx:0,
144 'parent_id': __get_def_directory
147 # filename_uniq is not possible in pure SQL
149 def _check_duplication(self, cr, uid, vals, ids=[], op='create'):
150 name = vals.get('name', False)
151 parent_id = vals.get('parent_id', False)
152 res_model = vals.get('res_model', False)
153 res_id = vals.get('res_id', 0)
155 for file in self.browse(cr, uid, ids): # FIXME fields_only
159 parent_id = file.parent_id and file.parent_id.id or False
161 res_model = file.res_model and file.res_model or False
163 res_id = file.res_id and file.res_id or 0
164 res = self.search(cr, uid, [('id', '<>', file.id), ('name', '=', name), ('parent_id', '=', parent_id), ('res_model', '=', res_model), ('res_id', '=', res_id)])
168 res = self.search(cr, uid, [('name', '=', name), ('parent_id', '=', parent_id), ('res_id', '=', res_id), ('res_model', '=', res_model)])
173 def check(self, cr, uid, ids, mode, context=None, values=None):
174 """Check access wrt. res_model, relax the rule of ir.attachment parent
176 With 'document' installed, everybody will have access to attachments of
177 any resources they can *read*.
179 return super(document_file, self).check(cr, uid, ids, mode='read',
180 context=context, values=values)
182 def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
183 # Grab ids, bypassing 'count'
184 ids = super(document_file, self).search(cr, uid, args, offset=offset,
185 limit=limit, order=order,
186 context=context, count=False)
188 return 0 if count else []
190 # Filter out documents that are in directories that the user is not allowed to read.
191 # Must use pure SQL to avoid access rules exceptions (we want to remove the records,
192 # not fail), and the records have been filtered in parent's search() anyway.
193 cr.execute('SELECT id, parent_id from "%s" WHERE id in %%s' % self._table, (tuple(ids),))
194 doc_pairs = cr.fetchall()
195 parent_ids = set(zip(*doc_pairs)[1])
196 visible_parent_ids = self.pool.get('document.directory').search(cr, uid, [('id', 'in', list(parent_ids))])
197 disallowed_parents = parent_ids.difference(visible_parent_ids)
198 for doc_id, parent_id in doc_pairs:
199 if parent_id in disallowed_parents:
201 return len(ids) if count else ids
204 def copy(self, cr, uid, id, default=None, context=None):
207 if 'name' not in default:
208 name = self.read(cr, uid, [id], ['name'])[0]['name']
209 default.update({'name': name + " " + _("(copy)")})
210 return super(document_file, self).copy(cr, uid, id, default, context=context)
212 def write(self, cr, uid, ids, vals, context=None):
214 if not isinstance(ids, list):
216 res = self.search(cr, uid, [('id', 'in', ids)])
219 if not self._check_duplication(cr, uid, vals, ids, 'write'):
220 raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
222 # if nodes call this write(), they must skip the code below
223 from_node = context and context.get('__from_node', False)
224 if (('parent_id' in vals) or ('name' in vals)) and not from_node:
225 # perhaps this file is renaming or changing directory
226 nctx = nodes.get_node_context(cr,uid,context={})
227 dirobj = self.pool.get('document.directory')
228 if 'parent_id' in vals:
229 dbro = dirobj.browse(cr, uid, vals['parent_id'], context=context)
230 dnode = nctx.get_dir_node(cr, dbro)
235 for fbro in self.browse(cr, uid, ids, context=context):
236 if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \
237 and ('name' not in vals or fbro.name == vals['name']):
240 fnode = nctx.get_file_node(cr, fbro)
241 res = fnode.move_to(cr, dnode or fnode.parent, vals.get('name', fbro.name), fbro, dbro, True)
242 if isinstance(res, dict):
245 wid = res.get('id', fbro.id)
246 result = super(document_file,self).write(cr,uid,wid,vals2,context=context)
247 # TODO: how to handle/merge several results?
253 if 'file_size' in vals: # only write that field using direct SQL calls
254 del vals['file_size']
256 result = super(document_file,self).write(cr, uid, ids, vals, context=context)
259 def create(self, cr, uid, vals, context=None):
262 vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
263 if not vals['parent_id']:
264 vals['parent_id'] = self.pool.get('document.directory')._get_root_directory(cr,uid, context)
265 if not vals.get('res_id', False) and context.get('default_res_id', False):
266 vals['res_id'] = context.get('default_res_id', False)
267 if not vals.get('res_model', False) and context.get('default_res_model', False):
268 vals['res_model'] = context.get('default_res_model', False)
269 if vals.get('res_id', False) and vals.get('res_model', False) \
270 and not vals.get('partner_id', False):
271 vals['partner_id'] = self.__get_partner_id(cr, uid, \
272 vals['res_model'], vals['res_id'], context)
275 if vals.get('link', False) :
277 datas = base64.encodestring(urllib.urlopen(vals['link']).read())
279 datas = vals.get('datas', False)
282 vals['file_size'] = len(datas)
284 if vals.get('file_size'):
285 del vals['file_size']
286 result = self._check_duplication(cr, uid, vals)
289 ('res_id', '=', vals['res_id']),
290 ('res_model', '=', vals['res_model']),
291 ('datas_fname', '=', vals['datas_fname']),
293 attach_ids = self.search(cr, uid, domain, context=context)
294 super(document_file, self).write(cr, uid, attach_ids,
295 {'datas' : vals['datas']},
297 result = attach_ids[0]
299 #raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
300 result = super(document_file, self).create(cr, uid, vals, context)
303 def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
304 """ A helper to retrieve the associated partner from any res_model+id
305 It is a hack that will try to discover if the mentioned record is
306 clearly associated with a partner record.
308 obj_model = self.pool.get(res_model)
309 if obj_model._name == 'res.partner':
311 elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
312 bro = obj_model.browse(cr, uid, res_id, context=context)
313 return bro.partner_id.id
314 elif 'address_id' in obj_model._columns and obj_model._columns['address_id']._obj == 'res.partner.address':
315 bro = obj_model.browse(cr, uid, res_id, context=context)
316 return bro.address_id.partner_id.id
319 def unlink(self, cr, uid, ids, context=None):
320 stor = self.pool.get('document.storage')
322 # We have to do the unlink in 2 stages: prepare a list of actual
323 # files to be unlinked, update the db (safer to do first, can be
324 # rolled back) and then unlink the files. The list wouldn't exist
325 # after we discard the objects
326 ids = self.search(cr, uid, [('id','in',ids)])
327 for f in self.browse(cr, uid, ids, context=context):
328 # TODO: update the node cache
333 storage_id = par.storage_id
336 #assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
338 r = stor.prepare_unlink(cr, uid, storage_id, f)
342 logging.getLogger('document').warning("Unlinking attachment #%s %s that has no storage",
344 res = super(document_file, self).unlink(cr, uid, ids, context)
345 stor.do_unlink(cr, uid, unres)
351 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: