1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
23 from osv import osv, fields
26 # from psycopg2 import Binary
27 #from tools import config
29 from tools.translate import _
33 DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config['root_path'], 'filestore'))
35 class document_file(osv.osv):
36 _inherit = 'ir.attachment'
37 _rec_name = 'datas_fname'
39 def _attach_parent_id(self, cr, uid, ids=None, context=None):
40 """Migrate ir.attachments to the document module.
42 When the 'document' module is loaded on a db that has had plain attachments,
43 they will need to be attached to some parent folder, and be converted from
44 base64-in-bytea to raw-in-bytea format.
45 This function performs the internal migration, once and forever, for these
46 attachments. It cannot be done through the nominal ORM maintenance code,
47 because the root folder is only created after the document_data.xml file
49 It also establishes the parent_id NOT NULL constraint that ir.attachment
50 should have had (but would have failed if plain attachments contained null
54 parent_id = self.pool.get('document.directory')._get_root_directory(cr,uid)
56 logging.getLogger('document').warning("at _attach_parent_id(), still not able to set the parent!")
60 raise NotImplementedError("Ids is just there by convention! Don't use it yet, please.")
62 cr.execute("UPDATE ir_attachment " \
63 "SET parent_id = %s, db_datas = decode(encode(db_datas,'escape'), 'base64') " \
64 "WHERE parent_id IS NULL", (parent_id,))
65 cr.execute("ALTER TABLE ir_attachment ALTER parent_id SET NOT NULL")
68 def _get_filestore(self, cr):
69 return os.path.join(DMS_ROOT_PATH, cr.dbname)
71 def _data_get(self, cr, uid, ids, name, arg, context=None):
74 fbrl = self.browse(cr, uid, ids, context=context)
75 nctx = nodes.get_node_context(cr, uid, context={})
76 # nctx will /not/ inherit the caller's context. Most of
77 # it would be useless, anyway (like active_id, active_model,
80 bin_size = context.get('bin_size', False)
82 fnode = nodes.node_file(None, None, nctx, fbro)
84 data = fnode.get_data(cr, fbro)
85 result[fbro.id] = base64.encodestring(data or '')
87 result[fbro.id] = fnode.get_data_len(cr, fbro)
92 # This code can be improved
94 def _data_set(self, cr, uid, id, name, value, arg, context=None):
97 fbro = self.browse(cr, uid, id, context=context)
98 nctx = nodes.get_node_context(cr, uid, context={})
99 fnode = nodes.node_file(None, None, nctx, fbro)
100 res = fnode.set_data(cr, base64.decodestring(value), fbro)
104 # Columns from ir.attachment:
105 'create_date': fields.datetime('Date Created', readonly=True),
106 'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
107 'write_date': fields.datetime('Date Modified', readonly=True),
108 'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
109 'res_model': fields.char('Attached Model', size=64, readonly=True, change_default=True),
110 'res_id': fields.integer('Attached ID', readonly=True),
112 # If ir.attachment contained any data before document is installed, preserve
113 # the data, don't drop the column!
114 'db_datas': fields.binary('Data', oldname='datas'),
115 'datas': fields.function(_data_get, method=True, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
117 # Fields of document:
118 'user_id': fields.many2one('res.users', 'Owner', select=1),
119 # 'group_ids': fields.many2many('res.groups', 'document_group_rel', 'item_id', 'group_id', 'Groups'),
120 # the directory id now is mandatory. It can still be computed automatically.
121 'parent_id': fields.many2one('document.directory', 'Directory', select=1, required=True, change_default=True),
122 'index_content': fields.text('Indexed Content'),
123 'partner_id':fields.many2one('res.partner', 'Partner', select=1),
124 'file_size': fields.integer('File Size', required=True),
125 'file_type': fields.char('Content Type', size=128),
127 # fields used for file storage
128 'store_fname': fields.char('Stored Filename', size=200),
130 _order = "create_date desc"
132 def __get_def_directory(self, cr, uid, context=None):
133 dirobj = self.pool.get('document.directory')
134 return dirobj._get_root_directory(cr, uid, context)
137 'user_id': lambda self, cr, uid, ctx:uid,
138 'file_size': lambda self, cr, uid, ctx:0,
139 'parent_id': __get_def_directory
142 # filename_uniq is not possible in pure SQL
144 def _check_duplication(self, cr, uid, vals, ids=[], op='create'):
145 name = vals.get('name', False)
146 parent_id = vals.get('parent_id', False)
147 res_model = vals.get('res_model', False)
148 res_id = vals.get('res_id', 0)
150 for file in self.browse(cr, uid, ids): # FIXME fields_only
154 parent_id = file.parent_id and file.parent_id.id or False
156 res_model = file.res_model and file.res_model or False
158 res_id = file.res_id and file.res_id or 0
159 res = self.search(cr, uid, [('id', '<>', file.id), ('name', '=', name), ('parent_id', '=', parent_id), ('res_model', '=', res_model), ('res_id', '=', res_id)])
163 res = self.search(cr, uid, [('name', '=', name), ('parent_id', '=', parent_id), ('res_id', '=', res_id), ('res_model', '=', res_model)])
168 def check(self, cr, uid, ids, mode, context=None, values=None):
169 """Check access wrt. res_model, relax the rule of ir.attachment parent
171 With 'document' installed, everybody will have access to attachments of
172 any resources they can *read*.
174 return super(document_file, self).check(cr, uid, ids, mode='read',
175 context=context, values=values)
177 def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
178 # Grab ids, bypassing 'count'
179 ids = super(document_file, self).search(cr, uid, args, offset=offset,
180 limit=limit, order=order,
181 context=context, count=False)
183 return 0 if count else []
185 # Filter out documents that are in directories that the user is not allowed to read.
186 # Must use pure SQL to avoid access rules exceptions (we want to remove the records,
187 # not fail), and the records have been filtered in parent's search() anyway.
188 cr.execute('SELECT id, parent_id from "%s" WHERE id in %%s' % self._table, (tuple(ids),))
189 doc_pairs = cr.fetchall()
190 parent_ids = set(zip(*doc_pairs)[1])
191 visible_parent_ids = self.pool.get('document.directory').search(cr, uid, [('id', 'in', list(parent_ids))])
192 disallowed_parents = parent_ids.difference(visible_parent_ids)
193 for doc_id, parent_id in doc_pairs:
194 if parent_id in disallowed_parents:
196 return len(ids) if count else ids
199 def copy(self, cr, uid, id, default=None, context=None):
202 if 'name' not in default:
203 name = self.read(cr, uid, [id], ['name'])[0]['name']
204 default.update({'name': name + " " + _("(copy)")})
205 return super(document_file, self).copy(cr, uid, id, default, context=context)
207 def write(self, cr, uid, ids, vals, context=None):
209 if not isinstance(ids, list):
211 res = self.search(cr, uid, [('id', 'in', ids)])
214 if not self._check_duplication(cr, uid, vals, ids, 'write'):
215 raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
217 # if nodes call this write(), they must skip the code below
218 from_node = context and context.get('__from_node', False)
219 if (('parent_id' in vals) or ('name' in vals)) and not from_node:
220 # perhaps this file is renaming or changing directory
221 nctx = nodes.get_node_context(cr,uid,context={})
222 dirobj = self.pool.get('document.directory')
223 if 'parent_id' in vals:
224 dbro = dirobj.browse(cr, uid, vals['parent_id'], context=context)
225 dnode = nctx.get_dir_node(cr, dbro)
230 for fbro in self.browse(cr, uid, ids, context=context):
231 if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \
232 and ('name' not in vals or fbro.name == vals['name']):
235 fnode = nctx.get_file_node(cr, fbro)
236 res = fnode.move_to(cr, dnode or fnode.parent, vals.get('name', fbro.name), fbro, dbro, True)
237 if isinstance(res, dict):
240 wid = res.get('id', fbro.id)
241 result = super(document_file,self).write(cr,uid,wid,vals2,context=context)
242 # TODO: how to handle/merge several results?
248 if 'file_size' in vals: # only write that field using direct SQL calls
249 del vals['file_size']
250 if len(ids) and len(vals):
251 result = super(document_file,self).write(cr, uid, ids, vals, context=context)
255 def create(self, cr, uid, vals, context=None):
258 vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
259 if not vals['parent_id']:
260 vals['parent_id'] = self.pool.get('document.directory')._get_root_directory(cr,uid, context)
261 if not vals.get('res_id', False) and context.get('default_res_id', False):
262 vals['res_id'] = context.get('default_res_id', False)
263 if not vals.get('res_model', False) and context.get('default_res_model', False):
264 vals['res_model'] = context.get('default_res_model', False)
265 if vals.get('res_id', False) and vals.get('res_model', False) \
266 and not vals.get('partner_id', False):
267 vals['partner_id'] = self.__get_partner_id(cr, uid, \
268 vals['res_model'], vals['res_id'], context)
271 if vals.get('link', False) :
273 datas = base64.encodestring(urllib.urlopen(vals['link']).read())
275 datas = vals.get('datas', False)
278 vals['file_size'] = len(datas)
280 if vals.get('file_size'):
281 del vals['file_size']
282 if not self._check_duplication(cr, uid, vals):
283 raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
284 result = super(document_file, self).create(cr, uid, vals, context)
288 def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
289 """ A helper to retrieve the associated partner from any res_model+id
290 It is a hack that will try to discover if the mentioned record is
291 clearly associated with a partner record.
293 obj_model = self.pool.get(res_model)
294 if obj_model._name == 'res.partner':
296 elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
297 bro = obj_model.browse(cr, uid, res_id, context=context)
298 return bro.partner_id.id
299 elif 'address_id' in obj_model._columns and obj_model._columns['address_id']._obj == 'res.partner.address':
300 bro = obj_model.browse(cr, uid, res_id, context=context)
301 return bro.address_id.partner_id.id
304 def unlink(self, cr, uid, ids, context=None):
305 stor = self.pool.get('document.storage')
307 # We have to do the unlink in 2 stages: prepare a list of actual
308 # files to be unlinked, update the db (safer to do first, can be
309 # rolled back) and then unlink the files. The list wouldn't exist
310 # after we discard the objects
311 ids = self.search(cr, uid, [('id','in',ids)])
312 for f in self.browse(cr, uid, ids, context=context):
313 # TODO: update the node cache
318 storage_id = par.storage_id
321 #assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
323 r = stor.prepare_unlink(cr, uid, storage_id, f)
327 logging.getLogger('document').warning("Unlinking attachment #%s %s that has no storage",
329 res = super(document_file, self).unlink(cr, uid, ids, context)
330 stor.do_unlink(cr, uid, unres)