1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
28 from openerp import tools
29 from openerp.tools.translate import _
30 from openerp.exceptions import AccessError
31 from openerp.osv import fields,osv
32 from openerp import SUPERUSER_ID
33 from openerp.osv.orm import except_orm
34 from openerp.tools.translate import _
36 _logger = logging.getLogger(__name__)
38 class ir_attachment(osv.osv):
39 """Attachments are used to link binary files or url to any openerp document.
41 External attachment storage
42 ---------------------------
44 The 'data' function field (_data_get,data_set) is implemented using
45 _file_read, _file_write and _file_delete which can be overridden to
46 implement other storage engines, shuch methods should check for other
47 location pseudo uri (example: hdfs://hadoppserver)
49 The default implementation is the file:dirname location that stores files
50 on the local filesystem using name based on their sha1 hash
53 def _name_get_resname(self, cr, uid, ids, object, method, context):
55 for attachment in self.browse(cr, uid, ids, context=context):
56 model_object = attachment.res_model
57 res_id = attachment.res_id
58 if model_object and res_id:
59 model_pool = self.pool[model_object]
60 res = model_pool.name_get(cr,uid,[res_id],context)
61 res_name = res and res[0][1] or None
63 field = self._columns.get('res_name',False)
64 if field and len(res_name) > field.size:
65 res_name = res_name[:30] + '...'
66 data[attachment.id] = res_name or False
68 data[attachment.id] = False
71 def _storage(self, cr, uid, context=None):
72 return self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'ir_attachment.location', 'file')
74 @tools.ormcache(skiparg=3)
75 def _filestore(self, cr, uid, context=None):
76 return tools.config.filestore(cr.dbname)
78 def force_storage(self, cr, uid, context=None):
79 """Force all attachments to be stored in the currently configured storage"""
80 if not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
81 raise AccessError(_('Only administrators can execute this action.'))
83 location = self._storage(cr, uid, context)
85 'db': [('store_fname', '!=', False)],
86 'file': [('db_datas', '!=', False)],
89 ids = self.search(cr, uid, domain, context=context)
90 for attach in self.browse(cr, uid, ids, context=context):
91 attach.write({'datas': attach.datas})
94 # 'data' field implementation
95 def _full_path(self, cr, uid, path):
97 path = re.sub('[.]', '', path)
98 path = path.strip('/\\')
99 return os.path.join(self._filestore(cr, uid), path)
101 def _get_path(self, cr, uid, bin_data):
102 sha = hashlib.sha1(bin_data).hexdigest()
104 # retro compatibility
105 fname = sha[:3] + '/' + sha
106 full_path = self._full_path(cr, uid, fname)
107 if os.path.isfile(full_path):
108 return fname, full_path # keep existing path
110 # scatter files across 256 dirs
111 # we use '/' in the db (even on windows)
112 fname = sha[:2] + '/' + sha
113 full_path = self._full_path(cr, uid, fname)
114 dirname = os.path.dirname(full_path)
115 if not os.path.isdir(dirname):
117 return fname, full_path
119 def _file_read(self, cr, uid, fname, bin_size=False):
120 full_path = self._full_path(cr, uid, fname)
124 r = os.path.getsize(full_path)
126 r = open(full_path,'rb').read().encode('base64')
128 _logger.exception("_read_file reading %s", full_path)
131 def _file_write(self, cr, uid, value):
132 bin_value = value.decode('base64')
133 fname, full_path = self._get_path(cr, uid, bin_value)
134 if not os.path.exists(full_path):
136 with open(full_path, 'wb') as fp:
139 _logger.exception("_file_write writing %s", full_path)
142 def _file_delete(self, cr, uid, fname):
143 count = self.search_count(cr, 1, [('store_fname','=',fname)])
144 full_path = self._full_path(cr, uid, fname)
145 if not count and os.path.exists(full_path):
149 _logger.exception("_file_delete could not unlink %s", full_path)
151 # Harmless and needed for race conditions
152 _logger.exception("_file_delete could not unlink %s", full_path)
154 def _data_get(self, cr, uid, ids, name, arg, context=None):
158 bin_size = context.get('bin_size')
159 for attach in self.browse(cr, uid, ids, context=context):
160 if attach.store_fname:
161 result[attach.id] = self._file_read(cr, uid, attach.store_fname, bin_size)
163 result[attach.id] = attach.db_datas
166 def _data_set(self, cr, uid, id, name, value, arg, context=None):
167 # We dont handle setting data to null
172 location = self._storage(cr, uid, context)
173 file_size = len(value.decode('base64'))
174 attach = self.browse(cr, uid, id, context=context)
175 fname_to_delete = attach.store_fname
177 fname = self._file_write(cr, uid, value)
178 # SUPERUSER_ID as probably don't have write access, trigger during create
179 super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'store_fname': fname, 'file_size': file_size, 'db_datas': False}, context=context)
181 super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'db_datas': value, 'file_size': file_size, 'store_fname': False}, context=context)
183 # After de-referencing the file in the database, check whether we need
184 # to garbage-collect it on the filesystem
186 self._file_delete(cr, uid, fname_to_delete)
189 _name = 'ir.attachment'
191 'name': fields.char('Attachment Name', required=True),
192 'datas_fname': fields.char('File Name'),
193 'description': fields.text('Description'),
194 'res_name': fields.function(_name_get_resname, type='char', string='Resource Name', store=True),
195 'res_model': fields.char('Resource Model', readonly=True, help="The database object this attachment will be attached to"),
196 'res_id': fields.integer('Resource ID', readonly=True, help="The record id this is attached to"),
197 'create_date': fields.datetime('Date Created', readonly=True),
198 'create_uid': fields.many2one('res.users', 'Owner', readonly=True),
199 'company_id': fields.many2one('res.company', 'Company', change_default=True),
200 'type': fields.selection( [ ('url','URL'), ('binary','Binary'), ],
201 'Type', help="Binary File or URL", required=True, change_default=True),
202 'url': fields.char('Url', size=1024),
203 # al: We keep shitty field names for backward compatibility with document
204 'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
205 'store_fname': fields.char('Stored Filename'),
206 'db_datas': fields.binary('Database Data'),
207 'file_size': fields.integer('File Size'),
213 'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c),
216 def _auto_init(self, cr, context=None):
217 super(ir_attachment, self)._auto_init(cr, context)
218 cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
219 if not cr.fetchone():
220 cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
223 def check(self, cr, uid, ids, mode, context=None, values=None):
224 """Restricts the access to an ir.attachment, according to referred model
225 In the 'document' module, it is overriden to relax this hard rule, since
226 more complex ones apply there.
229 require_employee = False
231 if isinstance(ids, (int, long)):
233 cr.execute('SELECT DISTINCT res_model, res_id FROM ir_attachment WHERE id = ANY (%s)', (ids,))
234 for rmod, rid in cr.fetchall():
235 if not (rmod and rid):
236 require_employee = True
238 res_ids.setdefault(rmod,set()).add(rid)
240 if values.get('res_model') and values.get('res_id'):
241 res_ids.setdefault(values['res_model'],set()).add(values['res_id'])
243 ima = self.pool.get('ir.model.access')
244 for model, mids in res_ids.items():
245 # ignore attachments that are not attached to a resource anymore when checking access rights
246 # (resource was deleted but attachment was not)
247 if not self.pool.get(model):
248 require_employee = True
250 existing_ids = self.pool[model].exists(cr, uid, mids)
251 if len(existing_ids) != len(mids):
252 require_employee = True
253 ima.check(cr, uid, model, mode)
254 self.pool[model].check_access_rule(cr, uid, existing_ids, mode, context=context)
256 if not self.pool['res.users'].has_group(cr, uid, 'base.group_user'):
257 raise except_orm(_('Access Denied'), _("Sorry, you are not allowed to access this document."))
259 def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
260 ids = super(ir_attachment, self)._search(cr, uid, args, offset=offset,
261 limit=limit, order=order,
262 context=context, count=False,
263 access_rights_uid=access_rights_uid)
269 # Work with a set, as list.remove() is prohibitive for large lists of documents
270 # (takes 20+ seconds on a db with 100k docs during search_count()!)
274 # For attachments, the permissions of the document they are attached to
275 # apply, so we must remove attachments for which the user cannot access
276 # the linked document.
277 # Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
278 # and the permissions are checked in super() and below anyway.
279 cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),))
280 targets = cr.dictfetchall()
281 model_attachments = {}
282 for target_dict in targets:
283 if not target_dict['res_model']:
285 # model_attachments = { 'model': { 'res_id': [id1,id2] } }
286 model_attachments.setdefault(target_dict['res_model'],{}).setdefault(target_dict['res_id'] or 0, set()).add(target_dict['id'])
288 # To avoid multiple queries for each attachment found, checks are
289 # performed in batch as much as possible.
290 ima = self.pool.get('ir.model.access')
291 for model, targets in model_attachments.iteritems():
292 if model not in self.pool:
294 if not ima.check(cr, uid, model, 'read', False):
295 # remove all corresponding attachment ids
296 for attach_id in itertools.chain(*targets.values()):
297 ids.remove(attach_id)
298 continue # skip ir.rule processing, these ones are out already
300 # filter ids according to what access rules permit
301 target_ids = targets.keys()
302 allowed_ids = [0] + self.pool[model].search(cr, uid, [('id', 'in', target_ids)], context=context)
303 disallowed_ids = set(target_ids).difference(allowed_ids)
304 for res_id in disallowed_ids:
305 for attach_id in targets[res_id]:
306 ids.remove(attach_id)
308 # sort result according to the original sort ordering
309 result = [id for id in orig_ids if id in ids]
310 return len(result) if count else list(result)
312 def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'):
313 if isinstance(ids, (int, long)):
315 self.check(cr, uid, ids, 'read', context=context)
316 return super(ir_attachment, self).read(cr, uid, ids, fields_to_read, context=context, load=load)
318 def write(self, cr, uid, ids, vals, context=None):
319 if isinstance(ids, (int, long)):
321 self.check(cr, uid, ids, 'write', context=context, values=vals)
322 if 'file_size' in vals:
323 del vals['file_size']
324 return super(ir_attachment, self).write(cr, uid, ids, vals, context)
326 def copy(self, cr, uid, id, default=None, context=None):
327 self.check(cr, uid, [id], 'write', context=context)
328 return super(ir_attachment, self).copy(cr, uid, id, default, context)
330 def unlink(self, cr, uid, ids, context=None):
331 if isinstance(ids, (int, long)):
333 self.check(cr, uid, ids, 'unlink', context=context)
335 # First delete in the database, *then* in the filesystem if the
336 # database allowed it. Helps avoid errors when concurrent transactions
337 # are deleting the same file, and some of the transactions are
338 # rolled back by PostgreSQL (due to concurrent updates detection).
339 to_delete = [a.store_fname
340 for a in self.browse(cr, uid, ids, context=context)
342 res = super(ir_attachment, self).unlink(cr, uid, ids, context)
343 for file_path in to_delete:
344 self._file_delete(cr, uid, file_path)
348 def create(self, cr, uid, values, context=None):
349 self.check(cr, uid, [], mode='write', context=context, values=values)
350 if 'file_size' in values:
351 del values['file_size']
352 return super(ir_attachment, self).create(cr, uid, values, context)
354 def action_get(self, cr, uid, context=None):
355 return self.pool.get('ir.actions.act_window').for_xml_id(
356 cr, uid, 'base', 'action_attachment', context=context)
358 def invalidate_bundle(self, cr, uid, type='%', xmlid=None, context=None):
359 assert type in ('%', 'css', 'js'), "Unhandled bundle type"
360 xmlid = '%' if xmlid is None else xmlid + '%'
361 domain = [('url', '=like', '/web/%s/%s/%%' % (type, xmlid))]
362 ids = self.search(cr, uid, domain, context=context)
364 self.unlink(cr, uid, ids, context=context)
366 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: