1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
28 from openerp import tools
29 from openerp.osv import fields,osv
30 from openerp import SUPERUSER_ID
32 _logger = logging.getLogger(__name__)
34 class ir_attachment(osv.osv):
35 """Attachments are used to link binary files or url to any openerp document.
37 External attachment storage
38 ---------------------------
40 The 'data' function field (_data_get,data_set) is implemented using
41 _file_read, _file_write and _file_delete which can be overridden to
42 implement other storage engines, shuch methods should check for other
43 location pseudo uri (example: hdfs://hadoppserver)
45 The default implementation is the file:dirname location that stores files
46 on the local filesystem using name based on their sha1 hash
48 def _name_get_resname(self, cr, uid, ids, object, method, context):
50 for attachment in self.browse(cr, uid, ids, context=context):
51 model_object = attachment.res_model
52 res_id = attachment.res_id
53 if model_object and res_id:
54 model_pool = self.pool.get(model_object)
55 res = model_pool.name_get(cr,uid,[res_id],context)
56 res_name = res and res[0][1] or False
58 field = self._columns.get('res_name',False)
59 if field and len(res_name) > field.size:
60 res_name = res_name[:field.size-3] + '...'
61 data[attachment.id] = res_name
63 data[attachment.id] = False
66 # 'data' field implementation
67 def _full_path(self, cr, uid, location, path):
68 # location = 'file:filestore'
69 assert location.startswith('file:'), "Unhandled filestore location %s" % location
70 location = location[5:]
72 # sanitize location name and path
73 location = re.sub('[.]','',location)
74 location = location.strip('/\\')
76 path = re.sub('[.]','',path)
77 path = path.strip('/\\')
78 return os.path.join(tools.config['root_path'], location, cr.dbname, path)
80 def _file_read(self, cr, uid, location, fname, bin_size=False):
81 full_path = self._full_path(cr, uid, location, fname)
85 r = os.path.getsize(full_path)
87 r = open(full_path,'rb').read().encode('base64')
89 _logger.error("_read_file reading %s",full_path)
92 def _file_write(self, cr, uid, location, value):
93 bin_value = value.decode('base64')
94 fname = hashlib.sha1(bin_value).hexdigest()
95 # scatter files across 1024 dirs
96 # we use '/' in the db (even on windows)
97 fname = fname[:3] + '/' + fname
98 full_path = self._full_path(cr, uid, location, fname)
100 dirname = os.path.dirname(full_path)
101 if not os.path.isdir(dirname):
103 open(full_path,'wb').write(bin_value)
105 _logger.error("_file_write writing %s",full_path)
108 def _file_delete(self, cr, uid, location, fname):
109 count = self.search(cr, 1, [('store_fname','=',fname)], count=True)
111 full_path = self._full_path(cr, uid, location, fname)
115 _logger.error("_file_delete could not unlink %s",full_path)
117 # Harmless and needed for race conditions
118 _logger.error("_file_delete could not unlink %s",full_path)
120 def _data_get(self, cr, uid, ids, name, arg, context=None):
124 location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
125 bin_size = context.get('bin_size')
126 for attach in self.browse(cr, uid, ids, context=context):
127 if location and attach.store_fname:
128 result[attach.id] = self._file_read(cr, uid, location, attach.store_fname, bin_size)
130 result[attach.id] = attach.db_datas
133 def _data_set(self, cr, uid, id, name, value, arg, context=None):
134 # We dont handle setting data to null
139 location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
140 file_size = len(value.decode('base64'))
142 attach = self.browse(cr, uid, id, context=context)
143 if attach.store_fname:
144 self._file_delete(cr, uid, location, attach.store_fname)
145 fname = self._file_write(cr, uid, location, value)
146 super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'store_fname': fname, 'file_size': file_size}, context=context)
148 super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'db_datas': value, 'file_size': file_size}, context=context)
151 _name = 'ir.attachment'
153 'name': fields.char('Attachment Name',size=256, required=True),
154 'datas_fname': fields.char('File Name',size=256),
155 'description': fields.text('Description'),
156 'res_name': fields.function(_name_get_resname, type='char', size=128, string='Resource Name', store=True),
157 'res_model': fields.char('Resource Model',size=64, readonly=True, help="The database object this attachment will be attached to"),
158 'res_id': fields.integer('Resource ID', readonly=True, help="The record id this is attached to"),
159 'create_date': fields.datetime('Date Created', readonly=True),
160 'create_uid': fields.many2one('res.users', 'Owner', readonly=True),
161 'company_id': fields.many2one('res.company', 'Company', change_default=True),
162 'type': fields.selection( [ ('url','URL'), ('binary','Binary'), ],
163 'Type', help="Binary File or URL", required=True, change_default=True),
164 'url': fields.char('Url', size=1024),
165 # al: We keep shitty field names for backward compatibility with document
166 'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
167 'store_fname': fields.char('Stored Filename', size=256),
168 'db_datas': fields.binary('Database Data'),
169 'file_size': fields.integer('File Size'),
175 'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c),
178 def _auto_init(self, cr, context=None):
179 super(ir_attachment, self)._auto_init(cr, context)
180 cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
181 if not cr.fetchone():
182 cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
185 def check(self, cr, uid, ids, mode, context=None, values=None):
186 """Restricts the access to an ir.attachment, according to referred model
187 In the 'document' module, it is overriden to relax this hard rule, since
188 more complex ones apply there.
192 if isinstance(ids, (int, long)):
194 cr.execute('SELECT DISTINCT res_model, res_id FROM ir_attachment WHERE id = ANY (%s)', (ids,))
195 for rmod, rid in cr.fetchall():
196 if not (rmod and rid):
198 res_ids.setdefault(rmod,set()).add(rid)
200 if values.get('res_model') and 'res_id' in values:
201 res_ids.setdefault(values['res_model'],set()).add(values['res_id'])
203 ima = self.pool.get('ir.model.access')
204 for model, mids in res_ids.items():
205 # ignore attachments that are not attached to a resource anymore when checking access rights
206 # (resource was deleted but attachment was not)
207 mids = self.pool.get(model).exists(cr, uid, mids)
208 ima.check(cr, uid, model, mode)
209 self.pool.get(model).check_access_rule(cr, uid, mids, mode, context=context)
211 def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
212 ids = super(ir_attachment, self)._search(cr, uid, args, offset=offset,
213 limit=limit, order=order,
214 context=context, count=False,
215 access_rights_uid=access_rights_uid)
221 # Work with a set, as list.remove() is prohibitive for large lists of documents
222 # (takes 20+ seconds on a db with 100k docs during search_count()!)
226 # For attachments, the permissions of the document they are attached to
227 # apply, so we must remove attachments for which the user cannot access
228 # the linked document.
229 # Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
230 # and the permissions are checked in super() and below anyway.
231 cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),))
232 targets = cr.dictfetchall()
233 model_attachments = {}
234 for target_dict in targets:
235 if not (target_dict['res_id'] and target_dict['res_model']):
237 # model_attachments = { 'model': { 'res_id': [id1,id2] } }
238 model_attachments.setdefault(target_dict['res_model'],{}).setdefault(target_dict['res_id'],set()).add(target_dict['id'])
240 # To avoid multiple queries for each attachment found, checks are
241 # performed in batch as much as possible.
242 ima = self.pool.get('ir.model.access')
243 for model, targets in model_attachments.iteritems():
244 if not ima.check(cr, uid, model, 'read', False):
245 # remove all corresponding attachment ids
246 for attach_id in itertools.chain(*targets.values()):
247 ids.remove(attach_id)
248 continue # skip ir.rule processing, these ones are out already
250 # filter ids according to what access rules permit
251 target_ids = targets.keys()
252 allowed_ids = self.pool.get(model).search(cr, uid, [('id', 'in', target_ids)], context=context)
253 disallowed_ids = set(target_ids).difference(allowed_ids)
254 for res_id in disallowed_ids:
255 for attach_id in targets[res_id]:
256 ids.remove(attach_id)
258 # sort result according to the original sort ordering
259 result = [id for id in orig_ids if id in ids]
260 return len(result) if count else list(result)
262 def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'):
263 if isinstance(ids, (int, long)):
265 self.check(cr, uid, ids, 'read', context=context)
266 return super(ir_attachment, self).read(cr, uid, ids, fields_to_read, context, load)
268 def write(self, cr, uid, ids, vals, context=None):
269 if isinstance(ids, (int, long)):
271 self.check(cr, uid, ids, 'write', context=context, values=vals)
272 if 'file_size' in vals:
273 del vals['file_size']
274 return super(ir_attachment, self).write(cr, uid, ids, vals, context)
276 def copy(self, cr, uid, id, default=None, context=None):
277 self.check(cr, uid, [id], 'write', context=context)
278 return super(ir_attachment, self).copy(cr, uid, id, default, context)
280 def unlink(self, cr, uid, ids, context=None):
281 if isinstance(ids, (int, long)):
283 self.check(cr, uid, ids, 'unlink', context=context)
284 location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
286 for attach in self.browse(cr, uid, ids, context=context):
287 if attach.store_fname:
288 self._file_delete(cr, uid, location, attach.store_fname)
289 return super(ir_attachment, self).unlink(cr, uid, ids, context)
291 def create(self, cr, uid, values, context=None):
292 self.check(cr, uid, [], mode='write', context=context, values=values)
293 if 'file_size' in values:
294 del values['file_size']
295 return super(ir_attachment, self).create(cr, uid, values, context)
297 def action_get(self, cr, uid, context=None):
298 return self.pool.get('ir.actions.act_window').for_xml_id(
299 cr, uid, 'base', 'action_attachment', context=context)
301 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: