1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
28 from openerp import tools
29 from openerp.osv import fields,osv
31 _logger = logging.getLogger(__name__)
33 class ir_attachment(osv.osv):
34 """Attachments are used to link binary files or url to any openerp document.
36 External attachment storage
37 ---------------------------
39 The 'data' function field (_data_get,data_set) is implemented using
40 _file_read, _file_write and _file_delete which can be overridden to
41 implement other storage engines, shuch methods should check for other
42 location pseudo uri (example: hdfs://hadoppserver)
44 The default implementation is the file:dirname location that stores files
45 on the local filesystem using name based on their sha1 hash
47 def _name_get_resname(self, cr, uid, ids, object, method, context):
49 for attachment in self.browse(cr, uid, ids, context=context):
50 model_object = attachment.res_model
51 res_id = attachment.res_id
52 if model_object and res_id:
53 model_pool = self.pool.get(model_object)
54 res = model_pool.name_get(cr,uid,[res_id],context)
55 res_name = res and res[0][1] or False
57 field = self._columns.get('res_name',False)
58 if field and len(res_name) > field.size:
59 res_name = res_name[:field.size-3] + '...'
60 data[attachment.id] = res_name
62 data[attachment.id] = False
65 # 'data' field implementation
66 def _full_path(self, cr, uid, location, path):
67 # location = 'file:filestore'
68 assert location.startswith('file:'), "Unhandled filestore location %s" % location
69 location = location[5:]
71 # sanitize location name and path
72 location = re.sub('[.]','',location)
73 location = location.strip('/\\')
75 path = re.sub('[.]','',path)
76 path = path.strip('/\\')
77 return os.path.join(tools.config['root_path'], location, cr.dbname, path)
79 def _file_read(self, cr, uid, location, fname, bin_size=False):
80 full_path = self._full_path(cr, uid, location, fname)
84 r = os.path.getsize(full_path)
86 r = open(full_path).read().encode('base64')
88 _logger.error("_read_file reading %s",full_path)
91 def _file_write(self, cr, uid, location, value):
92 bin_value = value.decode('base64')
93 fname = hashlib.sha1(bin_value).hexdigest()
94 # scatter files across 1024 dirs
95 # we use '/' in the db (even on windows)
96 fname = fname[:3] + '/' + fname
97 full_path = self._full_path(cr, uid, location, fname)
99 dirname = os.path.dirname(full_path)
100 if not os.path.isdir(dirname):
102 open(full_path,'wb').write(bin_value)
104 _logger.error("_file_write writing %s",full_path)
107 def _file_delete(self, cr, uid, location, fname):
108 count = self.search(cr, 1, [('store_fname','=',fname)], count=True)
110 full_path = self._full_path(cr, uid, location, fname)
114 _logger.error("_file_delete could not unlink %s",full_path)
116 # Harmless and needed for race conditions
117 _logger.error("_file_delete could not unlink %s",full_path)
119 def _data_get(self, cr, uid, ids, name, arg, context=None):
123 location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
124 bin_size = context.get('bin_size')
125 for attach in self.browse(cr, uid, ids, context=context):
126 if location and attach.store_fname:
127 result[attach.id] = self._file_read(cr, uid, location, attach.store_fname, bin_size)
129 result[attach.id] = attach.db_datas
132 def _data_set(self, cr, uid, id, name, value, arg, context=None):
133 # We dont handle setting data to null
138 location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
139 file_size = len(value.decode('base64'))
141 attach = self.browse(cr, uid, id, context=context)
142 if attach.store_fname:
143 self._file_delete(cr, uid, location, attach.store_fname)
144 fname = self._file_write(cr, uid, location, value)
145 super(ir_attachment, self).write(cr, uid, [id], {'store_fname': fname, 'file_size': file_size}, context=context)
147 super(ir_attachment, self).write(cr, uid, [id], {'db_datas': value, 'file_size': file_size}, context=context)
150 _name = 'ir.attachment'
152 'name': fields.char('Attachment Name',size=256, required=True),
153 'datas_fname': fields.char('File Name',size=256),
154 'description': fields.text('Description'),
155 'res_name': fields.function(_name_get_resname, type='char', size=128, string='Resource Name', store=True),
156 'res_model': fields.char('Resource Model',size=64, readonly=True, help="The database object this attachment will be attached to"),
157 'res_id': fields.integer('Resource ID', readonly=True, help="The record id this is attached to"),
158 'create_date': fields.datetime('Date Created', readonly=True),
159 'create_uid': fields.many2one('res.users', 'Owner', readonly=True),
160 'company_id': fields.many2one('res.company', 'Company', change_default=True),
161 'type': fields.selection( [ ('url','URL'), ('binary','Binary'), ],
162 'Type', help="Binary File or URL", required=True, change_default=True),
163 'url': fields.char('Url', size=1024),
164 # al: We keep shitty field names for backward compatibility with document
165 'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
166 'store_fname': fields.char('Stored Filename', size=256),
167 'db_datas': fields.binary('Database Data'),
168 'file_size': fields.integer('File Size'),
174 'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c),
177 def _auto_init(self, cr, context=None):
178 super(ir_attachment, self)._auto_init(cr, context)
179 cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
180 if not cr.fetchone():
181 cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
184 def check(self, cr, uid, ids, mode, context=None, values=None):
185 """Restricts the access to an ir.attachment, according to referred model
186 In the 'document' module, it is overriden to relax this hard rule, since
187 more complex ones apply there.
193 if isinstance(ids, (int, long)):
195 cr.execute('SELECT DISTINCT res_model, res_id FROM ir_attachment WHERE id = ANY (%s)', (ids,))
196 for rmod, rid in cr.fetchall():
197 if not (rmod and rid):
199 res_ids.setdefault(rmod,set()).add(rid)
201 if 'res_model' in values and 'res_id' in values:
202 res_ids.setdefault(values['res_model'],set()).add(values['res_id'])
204 ima = self.pool.get('ir.model.access')
205 for model, mids in res_ids.items():
206 # ignore attachments that are not attached to a resource anymore when checking access rights
207 # (resource was deleted but attachment was not)
208 mids = self.pool.get(model).exists(cr, uid, mids)
209 ima.check(cr, uid, model, mode)
210 self.pool.get(model).check_access_rule(cr, uid, mids, mode, context=context)
212 def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
213 ids = super(ir_attachment, self)._search(cr, uid, args, offset=offset,
214 limit=limit, order=order,
215 context=context, count=False,
216 access_rights_uid=access_rights_uid)
222 # Work with a set, as list.remove() is prohibitive for large lists of documents
223 # (takes 20+ seconds on a db with 100k docs during search_count()!)
227 # For attachments, the permissions of the document they are attached to
228 # apply, so we must remove attachments for which the user cannot access
229 # the linked document.
230 # Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
231 # and the permissions are checked in super() and below anyway.
232 cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),))
233 targets = cr.dictfetchall()
234 model_attachments = {}
235 for target_dict in targets:
236 if not (target_dict['res_id'] and target_dict['res_model']):
238 # model_attachments = { 'model': { 'res_id': [id1,id2] } }
239 model_attachments.setdefault(target_dict['res_model'],{}).setdefault(target_dict['res_id'],set()).add(target_dict['id'])
241 # To avoid multiple queries for each attachment found, checks are
242 # performed in batch as much as possible.
243 ima = self.pool.get('ir.model.access')
244 for model, targets in model_attachments.iteritems():
245 if not ima.check(cr, uid, model, 'read', False):
246 # remove all corresponding attachment ids
247 for attach_id in itertools.chain(*targets.values()):
248 ids.remove(attach_id)
249 continue # skip ir.rule processing, these ones are out already
251 # filter ids according to what access rules permit
252 target_ids = targets.keys()
253 allowed_ids = self.pool.get(model).search(cr, uid, [('id', 'in', target_ids)], context=context)
254 disallowed_ids = set(target_ids).difference(allowed_ids)
255 for res_id in disallowed_ids:
256 for attach_id in targets[res_id]:
257 ids.remove(attach_id)
259 # sort result according to the original sort ordering
260 result = [id for id in orig_ids if id in ids]
261 return len(result) if count else list(result)
263 def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'):
264 self.check(cr, uid, ids, 'read', context=context)
265 return super(ir_attachment, self).read(cr, uid, ids, fields_to_read, context, load)
267 def write(self, cr, uid, ids, vals, context=None):
268 self.check(cr, uid, ids, 'write', context=context, values=vals)
269 if 'file_size' in vals:
270 del vals['file_size']
271 return super(ir_attachment, self).write(cr, uid, ids, vals, context)
273 def copy(self, cr, uid, id, default=None, context=None):
274 self.check(cr, uid, [id], 'write', context=context)
275 return super(ir_attachment, self).copy(cr, uid, id, default, context)
277 def unlink(self, cr, uid, ids, context=None):
278 self.check(cr, uid, ids, 'unlink', context=context)
279 location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
281 for attach in self.browse(cr, uid, ids, context=context):
282 if attach.store_fname:
283 self._file_delete(cr, uid, location, attach.store_fname)
284 return super(ir_attachment, self).unlink(cr, uid, ids, context)
286 def create(self, cr, uid, values, context=None):
287 self.check(cr, uid, [], mode='create', context=context, values=values)
288 if 'file_size' in values:
289 del values['file_size']
290 return super(ir_attachment, self).create(cr, uid, values, context)
292 def action_get(self, cr, uid, context=None):
293 return self.pool.get('ir.actions.act_window').for_xml_id(
294 cr, uid, 'base', 'action_attachment', context=context)
296 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: