1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
28 from openerp import tools
29 from openerp.osv import fields,osv
30 from openerp import SUPERUSER_ID
32 _logger = logging.getLogger(__name__)
34 class ir_attachment(osv.osv):
35 """Attachments are used to link binary files or url to any openerp document.
37 External attachment storage
38 ---------------------------
40 The 'data' function field (_data_get,data_set) is implemented using
41 _file_read, _file_write and _file_delete which can be overridden to
42 implement other storage engines, shuch methods should check for other
43 location pseudo uri (example: hdfs://hadoppserver)
45 The default implementation is the file:dirname location that stores files
46 on the local filesystem using name based on their sha1 hash
48 def _name_get_resname(self, cr, uid, ids, object, method, context):
50 for attachment in self.browse(cr, uid, ids, context=context):
51 model_object = attachment.res_model
52 res_id = attachment.res_id
53 if model_object and res_id:
54 model_pool = self.pool.get(model_object)
55 res = model_pool.name_get(cr,uid,[res_id],context)
56 res_name = res and res[0][1] or False
58 field = self._columns.get('res_name',False)
59 if field and len(res_name) > field.size:
60 res_name = res_name[:field.size-3] + '...'
61 data[attachment.id] = res_name
63 data[attachment.id] = False
66 # 'data' field implementation
67 def _full_path(self, cr, uid, location, path):
68 # location = 'file:filestore'
69 assert location.startswith('file:'), "Unhandled filestore location %s" % location
70 location = location[5:]
72 # sanitize location name and path
73 location = re.sub('[.]','',location)
74 location = location.strip('/\\')
76 path = re.sub('[.]','',path)
77 path = path.strip('/\\')
78 return os.path.join(tools.config['root_path'], location, cr.dbname, path)
80 def _file_read(self, cr, uid, location, fname, bin_size=False):
81 full_path = self._full_path(cr, uid, location, fname)
85 r = os.path.getsize(full_path)
87 r = open(full_path,'rb').read().encode('base64')
89 _logger.error("_read_file reading %s",full_path)
92 def _file_write(self, cr, uid, location, value):
93 bin_value = value.decode('base64')
94 fname = hashlib.sha1(bin_value).hexdigest()
95 # scatter files across 1024 dirs
96 # we use '/' in the db (even on windows)
97 fname = fname[:3] + '/' + fname
98 full_path = self._full_path(cr, uid, location, fname)
100 dirname = os.path.dirname(full_path)
101 if not os.path.isdir(dirname):
103 open(full_path,'wb').write(bin_value)
105 _logger.error("_file_write writing %s",full_path)
108 def _file_delete(self, cr, uid, location, fname):
109 count = self.search(cr, 1, [('store_fname','=',fname)], count=True)
111 full_path = self._full_path(cr, uid, location, fname)
115 _logger.error("_file_delete could not unlink %s",full_path)
117 # Harmless and needed for race conditions
118 _logger.error("_file_delete could not unlink %s",full_path)
120 def _data_get(self, cr, uid, ids, name, arg, context=None):
124 location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
125 bin_size = context.get('bin_size')
126 for attach in self.browse(cr, uid, ids, context=context):
127 if location and attach.store_fname:
128 result[attach.id] = self._file_read(cr, uid, location, attach.store_fname, bin_size)
130 result[attach.id] = attach.db_datas
133 def _data_set(self, cr, uid, id, name, value, arg, context=None):
134 # We dont handle setting data to null
139 location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
140 file_size = len(value.decode('base64'))
142 attach = self.browse(cr, uid, id, context=context)
143 if attach.store_fname:
144 self._file_delete(cr, uid, location, attach.store_fname)
145 fname = self._file_write(cr, uid, location, value)
146 # SUPERUSER_ID as probably don't have write access, trigger during create
147 super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'store_fname': fname, 'file_size': file_size}, context=context)
149 super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'db_datas': value, 'file_size': file_size}, context=context)
152 _name = 'ir.attachment'
154 'name': fields.char('Attachment Name',size=256, required=True),
155 'datas_fname': fields.char('File Name',size=256),
156 'description': fields.text('Description'),
157 'res_name': fields.function(_name_get_resname, type='char', size=128, string='Resource Name', store=True),
158 'res_model': fields.char('Resource Model',size=64, readonly=True, help="The database object this attachment will be attached to"),
159 'res_id': fields.integer('Resource ID', readonly=True, help="The record id this is attached to"),
160 'create_date': fields.datetime('Date Created', readonly=True),
161 'create_uid': fields.many2one('res.users', 'Owner', readonly=True),
162 'company_id': fields.many2one('res.company', 'Company', change_default=True),
163 'type': fields.selection( [ ('url','URL'), ('binary','Binary'), ],
164 'Type', help="Binary File or URL", required=True, change_default=True),
165 'url': fields.char('Url', size=1024),
166 # al: We keep shitty field names for backward compatibility with document
167 'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
168 'store_fname': fields.char('Stored Filename', size=256),
169 'db_datas': fields.binary('Database Data'),
170 'file_size': fields.integer('File Size'),
176 'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c),
179 def _auto_init(self, cr, context=None):
180 super(ir_attachment, self)._auto_init(cr, context)
181 cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
182 if not cr.fetchone():
183 cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
186 def check(self, cr, uid, ids, mode, context=None, values=None):
187 """Restricts the access to an ir.attachment, according to referred model
188 In the 'document' module, it is overriden to relax this hard rule, since
189 more complex ones apply there.
193 if isinstance(ids, (int, long)):
195 cr.execute('SELECT DISTINCT res_model, res_id FROM ir_attachment WHERE id = ANY (%s)', (ids,))
196 for rmod, rid in cr.fetchall():
197 if not (rmod and rid):
199 res_ids.setdefault(rmod,set()).add(rid)
201 if values.get('res_model') and values.get('res_id'):
202 res_ids.setdefault(values['res_model'],set()).add(values['res_id'])
204 ima = self.pool.get('ir.model.access')
205 for model, mids in res_ids.items():
206 # ignore attachments that are not attached to a resource anymore when checking access rights
207 # (resource was deleted but attachment was not)
208 mids = self.pool.get(model).exists(cr, uid, mids)
209 ima.check(cr, uid, model, mode)
210 self.pool.get(model).check_access_rule(cr, uid, mids, mode, context=context)
212 def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
213 ids = super(ir_attachment, self)._search(cr, uid, args, offset=offset,
214 limit=limit, order=order,
215 context=context, count=False,
216 access_rights_uid=access_rights_uid)
222 # Work with a set, as list.remove() is prohibitive for large lists of documents
223 # (takes 20+ seconds on a db with 100k docs during search_count()!)
227 # For attachments, the permissions of the document they are attached to
228 # apply, so we must remove attachments for which the user cannot access
229 # the linked document.
230 # Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
231 # and the permissions are checked in super() and below anyway.
232 cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),))
233 targets = cr.dictfetchall()
234 model_attachments = {}
235 for target_dict in targets:
236 if not (target_dict['res_id'] and target_dict['res_model']):
238 # model_attachments = { 'model': { 'res_id': [id1,id2] } }
239 model_attachments.setdefault(target_dict['res_model'],{}).setdefault(target_dict['res_id'],set()).add(target_dict['id'])
241 # To avoid multiple queries for each attachment found, checks are
242 # performed in batch as much as possible.
243 ima = self.pool.get('ir.model.access')
244 for model, targets in model_attachments.iteritems():
245 if not ima.check(cr, uid, model, 'read', False):
246 # remove all corresponding attachment ids
247 for attach_id in itertools.chain(*targets.values()):
248 ids.remove(attach_id)
249 continue # skip ir.rule processing, these ones are out already
251 # filter ids according to what access rules permit
252 target_ids = targets.keys()
253 allowed_ids = self.pool.get(model).search(cr, uid, [('id', 'in', target_ids)], context=context)
254 disallowed_ids = set(target_ids).difference(allowed_ids)
255 for res_id in disallowed_ids:
256 for attach_id in targets[res_id]:
257 ids.remove(attach_id)
259 # sort result according to the original sort ordering
260 result = [id for id in orig_ids if id in ids]
261 return len(result) if count else list(result)
263 def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'):
264 if isinstance(ids, (int, long)):
266 self.check(cr, uid, ids, 'read', context=context)
267 return super(ir_attachment, self).read(cr, uid, ids, fields_to_read, context, load)
269 def write(self, cr, uid, ids, vals, context=None):
270 if isinstance(ids, (int, long)):
272 self.check(cr, uid, ids, 'write', context=context, values=vals)
273 if 'file_size' in vals:
274 del vals['file_size']
275 return super(ir_attachment, self).write(cr, uid, ids, vals, context)
277 def copy(self, cr, uid, id, default=None, context=None):
278 self.check(cr, uid, [id], 'write', context=context)
279 return super(ir_attachment, self).copy(cr, uid, id, default, context)
281 def unlink(self, cr, uid, ids, context=None):
282 if isinstance(ids, (int, long)):
284 self.check(cr, uid, ids, 'unlink', context=context)
285 location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
287 for attach in self.browse(cr, uid, ids, context=context):
288 if attach.store_fname:
289 self._file_delete(cr, uid, location, attach.store_fname)
290 return super(ir_attachment, self).unlink(cr, uid, ids, context)
292 def create(self, cr, uid, values, context=None):
293 self.check(cr, uid, [], mode='write', context=context, values=values)
294 if 'file_size' in values:
295 del values['file_size']
296 return super(ir_attachment, self).create(cr, uid, values, context)
298 def action_get(self, cr, uid, context=None):
299 return self.pool.get('ir.actions.act_window').for_xml_id(
300 cr, uid, 'base', 'action_attachment', context=context)
302 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: