1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
23 from osv import osv, fields
26 #from psycopg2 import Binary
27 #from tools import config
29 from tools.translate import _
33 _logger = logging.getLogger(__name__)
35 DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config['root_path'], 'filestore'))
37 class document_file(osv.osv):
38 _inherit = 'ir.attachment'
41 def _attach_parent_id(self, cr, uid, ids=None, context=None):
42 """Migrate ir.attachments to the document module.
44 When the 'document' module is loaded on a db that has had plain attachments,
45 they will need to be attached to some parent folder, and be converted from
46 base64-in-bytea to raw-in-bytea format.
47 This function performs the internal migration, once and forever, for these
48 attachments. It cannot be done through the nominal ORM maintenance code,
49 because the root folder is only created after the document_data.xml file
51 It also establishes the parent_id NOT NULL constraint that ir.attachment
52 should have had (but would have failed if plain attachments contained null
54 It also updates the File Size for the previously created attachments.
57 parent_id = self.pool.get('document.directory')._get_root_directory(cr,uid)
59 _logger.warning("at _attach_parent_id(), still not able to set the parent!")
63 raise NotImplementedError("Ids are just there by convention, please do not use it.")
65 cr.execute("UPDATE ir_attachment " \
66 "SET parent_id = %s, db_datas = decode(encode(db_datas,'escape'), 'base64') " \
67 "WHERE parent_id IS NULL", (parent_id,))
69 cr.execute("UPDATE ir_attachment SET file_size=length(db_datas) WHERE file_size = 0 and type = 'binary'")
71 cr.execute("ALTER TABLE ir_attachment ALTER parent_id SET NOT NULL")
75 def _get_filestore(self, cr):
76 return os.path.join(DMS_ROOT_PATH, cr.dbname)
78 def _data_get(self, cr, uid, ids, name, arg, context=None):
81 fbrl = self.browse(cr, uid, ids, context=context)
82 nctx = nodes.get_node_context(cr, uid, context={})
83 # nctx will /not/ inherit the caller's context. Most of
84 # it would be useless, anyway (like active_id, active_model,
87 bin_size = context.get('bin_size', False)
89 fnode = nodes.node_file(None, None, nctx, fbro)
91 data = fnode.get_data(cr, fbro)
92 result[fbro.id] = base64.encodestring(data or '')
94 result[fbro.id] = fnode.get_data_len(cr, fbro)
99 # This code can be improved
101 def _data_set(self, cr, uid, id, name, value, arg, context=None):
104 fbro = self.browse(cr, uid, id, context=context)
105 nctx = nodes.get_node_context(cr, uid, context={})
106 fnode = nodes.node_file(None, None, nctx, fbro)
107 res = fnode.set_data(cr, base64.decodestring(value), fbro)
111 # Columns from ir.attachment:
112 'create_date': fields.datetime('Date Created', readonly=True),
113 'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
114 'write_date': fields.datetime('Date Modified', readonly=True),
115 'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
116 'res_model': fields.char('Attached Model', size=64, readonly=True, change_default=True),
117 'res_id': fields.integer('Attached ID', readonly=True),
119 # If ir.attachment contained any data before document is installed, preserve
120 # the data, don't drop the column!
121 'db_datas': fields.binary('Data', oldname='datas'),
122 'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
124 # Fields of document:
125 'user_id': fields.many2one('res.users', 'Owner', select=1),
126 # 'group_ids': fields.many2many('res.groups', 'document_group_rel', 'item_id', 'group_id', 'Groups'),
127 # the directory id now is mandatory. It can still be computed automatically.
128 'parent_id': fields.many2one('document.directory', 'Directory', select=1, required=True, change_default=True),
129 'index_content': fields.text('Indexed Content'),
130 'partner_id':fields.many2one('res.partner', 'Partner', select=1),
131 'file_size': fields.integer('File Size', required=True),
132 'file_type': fields.char('Content Type', size=128),
134 # fields used for file storage
135 'store_fname': fields.char('Stored Filename', size=200),
139 def __get_def_directory(self, cr, uid, context=None):
140 dirobj = self.pool.get('document.directory')
141 return dirobj._get_root_directory(cr, uid, context)
144 'user_id': lambda self, cr, uid, ctx:uid,
145 'parent_id': __get_def_directory,
146 'file_size': lambda self, cr, uid, ctx:0,
149 # filename_uniq is not possible in pure SQL
152 def onchange_file(self, cr, uid, ids, datas_fname=False, context=None):
155 res['value'].update({'name': datas_fname})
158 def check(self, cr, uid, ids, mode, context=None, values=None):
159 """Check access wrt. res_model, relax the rule of ir.attachment parent
161 With 'document' installed, everybody will have access to attachments of
162 any resources they can *read*.
164 return super(document_file, self).check(cr, uid, ids, mode='read',
165 context=context, values=values)
167 def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
168 # Grab ids, bypassing 'count'
169 ids = super(document_file, self).search(cr, uid, args, offset=offset,
170 limit=limit, order=order,
171 context=context, count=False)
173 return 0 if count else []
175 # Filter out documents that are in directories that the user is not allowed to read.
176 # Must use pure SQL to avoid access rules exceptions (we want to remove the records,
177 # not fail), and the records have been filtered in parent's search() anyway.
178 cr.execute('SELECT id, parent_id from "%s" WHERE id in %%s' % self._table, (tuple(ids),))
179 doc_pairs = cr.fetchall()
180 parent_ids = set(zip(*doc_pairs)[1])
181 visible_parent_ids = self.pool.get('document.directory').search(cr, uid, [('id', 'in', list(parent_ids))])
182 disallowed_parents = parent_ids.difference(visible_parent_ids)
183 for doc_id, parent_id in doc_pairs:
184 if parent_id in disallowed_parents:
186 return len(ids) if count else ids
189 def copy(self, cr, uid, id, default=None, context=None):
192 if 'name' not in default:
193 name = self.read(cr, uid, [id], ['name'])[0]['name']
194 default.update(name=_("%s (copy)") % (name))
195 return super(document_file, self).copy(cr, uid, id, default, context=context)
197 def write(self, cr, uid, ids, vals, context=None):
199 if not isinstance(ids, list):
201 res = self.search(cr, uid, [('id', 'in', ids)])
205 # if nodes call this write(), they must skip the code below
206 from_node = context and context.get('__from_node', False)
207 if (('parent_id' in vals) or ('name' in vals)) and not from_node:
208 # perhaps this file is renaming or changing directory
209 nctx = nodes.get_node_context(cr,uid,context={})
210 dirobj = self.pool.get('document.directory')
211 if 'parent_id' in vals:
212 dbro = dirobj.browse(cr, uid, vals['parent_id'], context=context)
213 dnode = nctx.get_dir_node(cr, dbro)
218 for fbro in self.browse(cr, uid, ids, context=context):
219 if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \
220 and ('name' not in vals or fbro.name == vals['name']):
223 fnode = nctx.get_file_node(cr, fbro)
224 res = fnode.move_to(cr, dnode or fnode.parent, vals.get('name', fbro.name), fbro, dbro, True)
225 if isinstance(res, dict):
228 wid = res.get('id', fbro.id)
229 result = super(document_file,self).write(cr,uid,wid,vals2,context=context)
230 # TODO: how to handle/merge several results?
236 if 'file_size' in vals: # only write that field using direct SQL calls
237 del vals['file_size']
239 result = super(document_file,self).write(cr, uid, ids, vals, context=context)
242 def create(self, cr, uid, vals, context=None):
245 vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
246 if not vals['parent_id']:
247 vals['parent_id'] = self.pool.get('document.directory')._get_root_directory(cr,uid, context)
248 if not vals.get('res_id', False) and context.get('default_res_id', False):
249 vals['res_id'] = context.get('default_res_id', False)
250 if not vals.get('res_model', False) and context.get('default_res_model', False):
251 vals['res_model'] = context.get('default_res_model', False)
252 if vals.get('res_id', False) and vals.get('res_model', False) \
253 and not vals.get('partner_id', False):
254 vals['partner_id'] = self.__get_partner_id(cr, uid, \
255 vals['res_model'], vals['res_id'], context)
258 if vals.get('link', False) :
260 datas = base64.encodestring(urllib.urlopen(vals['link']).read())
262 datas = vals.get('datas', False)
265 vals['file_size'] = len(datas)
267 if vals.get('file_size'):
268 del vals['file_size']
270 return super(document_file, self).create(cr, uid, vals, context)
272 def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
273 """ A helper to retrieve the associated partner from any res_model+id
274 It is a hack that will try to discover if the mentioned record is
275 clearly associated with a partner record.
277 obj_model = self.pool.get(res_model)
278 if obj_model._name == 'res.partner':
280 elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
281 bro = obj_model.browse(cr, uid, res_id, context=context)
282 return bro.partner_id.id
285 def unlink(self, cr, uid, ids, context=None):
286 stor = self.pool.get('document.storage')
288 # We have to do the unlink in 2 stages: prepare a list of actual
289 # files to be unlinked, update the db (safer to do first, can be
290 # rolled back) and then unlink the files. The list wouldn't exist
291 # after we discard the objects
292 ids = self.search(cr, uid, [('id','in',ids)])
293 for f in self.browse(cr, uid, ids, context=context):
294 # TODO: update the node cache
299 storage_id = par.storage_id
302 #assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
304 r = stor.prepare_unlink(cr, uid, storage_id, f)
308 self.loggerdoc.warning("Unlinking attachment #%s %s that has no storage.",
310 res = super(document_file, self).unlink(cr, uid, ids, context)
311 stor.do_unlink(cr, uid, unres)
317 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: