1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
23 from osv import osv, fields
26 # from psycopg2 import Binary
27 #from tools import config
29 from tools.translate import _
33 DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config['root_path'], 'filestore'))
35 class document_file(osv.osv):
36 _inherit = 'ir.attachment'
37 _rec_name = 'datas_fname'
39 def _attach_parent_id(self, cr, uid, ids=None, context=None):
40 """Migrate ir.attachments to the document module.
42 When the 'document' module is loaded on a db that has had plain attachments,
43 they will need to be attached to some parent folder, and be converted from
44 base64-in-bytea to raw-in-bytea format.
45 This function performs the internal migration, once and forever, for these
46 attachments. It cannot be done through the nominal ORM maintenance code,
47 because the root folder is only created after the document_data.xml file
49 It also establishes the parent_id NOT NULL constraint that ir.attachment
50 should have had (but would have failed if plain attachments contained null
54 parent_id = self.pool.get('document.directory')._get_root_directory(cr,uid)
56 logging.getLogger('document').warning("at _attach_parent_id(), still not able to set the parent!")
60 raise NotImplementedError("Ids is just there by convention! Don't use it yet, please.")
62 cr.execute("UPDATE ir_attachment " \
63 "SET parent_id = %s, db_datas = decode(encode(db_datas,'escape'), 'base64') " \
64 "WHERE parent_id IS NULL", (parent_id,))
65 cr.execute("ALTER TABLE ir_attachment ALTER parent_id SET NOT NULL")
68 def _get_filestore(self, cr):
69 return os.path.join(DMS_ROOT_PATH, cr.dbname)
71 def _data_get(self, cr, uid, ids, name, arg, context=None):
74 fbrl = self.browse(cr, uid, ids, context=context)
75 nctx = nodes.get_node_context(cr, uid, context={})
76 # nctx will /not/ inherit the caller's context. Most of
77 # it would be useless, anyway (like active_id, active_model,
80 bin_size = context.get('bin_size', False)
82 fnode = nodes.node_file(None, None, nctx, fbro)
84 data = fnode.get_data(cr, fbro)
85 result[fbro.id] = base64.encodestring(data or '')
87 result[fbro.id] = fnode.get_data_len(cr, fbro)
92 # This code can be improved
94 def _data_set(self, cr, uid, id, name, value, arg, context=None):
97 fbro = self.browse(cr, uid, id, context=context)
98 nctx = nodes.get_node_context(cr, uid, context={})
99 fnode = nodes.node_file(None, None, nctx, fbro)
100 res = fnode.set_data(cr, base64.decodestring(value), fbro)
104 # Columns from ir.attachment:
105 'create_date': fields.datetime('Date Created', readonly=True),
106 'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
107 'write_date': fields.datetime('Date Modified', readonly=True),
108 'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
109 'res_model': fields.char('Attached Model', size=64, readonly=True, change_default=True),
110 'res_id': fields.integer('Attached ID', readonly=True),
112 # If ir.attachment contained any data before document is installed, preserve
113 # the data, don't drop the column!
114 'db_datas': fields.binary('Data', oldname='datas'),
115 'datas': fields.function(_data_get, method=True, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
117 # Fields of document:
118 'user_id': fields.many2one('res.users', 'Owner', select=1),
119 # 'group_ids': fields.many2many('res.groups', 'document_group_rel', 'item_id', 'group_id', 'Groups'),
120 # the directory id now is mandatory. It can still be computed automatically.
121 'parent_id': fields.many2one('document.directory', 'Directory', select=1, required=True, change_default=True),
122 'index_content': fields.text('Indexed Content'),
123 'partner_id':fields.many2one('res.partner', 'Partner', select=1),
124 'file_size': fields.integer('File Size', required=True),
125 'file_type': fields.char('Content Type', size=128),
127 # fields used for file storage
128 'store_fname': fields.char('Stored Filename', size=200),
130 _order = "create_date desc"
132 def __get_def_directory(self, cr, uid, context=None):
133 dirobj = self.pool.get('document.directory')
134 return dirobj._get_root_directory(cr, uid, context)
137 'user_id': lambda self, cr, uid, ctx:uid,
138 'file_size': lambda self, cr, uid, ctx:0,
139 'parent_id': __get_def_directory
142 # filename_uniq is not possible in pure SQL
144 def _check_duplication(self, cr, uid, vals, ids=[], op='create'):
145 name = vals.get('name', False)
146 parent_id = vals.get('parent_id', False)
147 res_model = vals.get('res_model', False)
148 res_id = vals.get('res_id', 0)
150 for file in self.browse(cr, uid, ids): # FIXME fields_only
154 parent_id = file.parent_id and file.parent_id.id or False
156 res_model = file.res_model and file.res_model or False
158 res_id = file.res_id and file.res_id or 0
159 res = self.search(cr, uid, [('id', '<>', file.id), ('name', '=', name), ('parent_id', '=', parent_id), ('res_model', '=', res_model), ('res_id', '=', res_id)])
163 res = self.search(cr, uid, [('name', '=', name), ('parent_id', '=', parent_id), ('res_id', '=', res_id), ('res_model', '=', res_model)])
168 def check(self, cr, uid, ids, mode, context=None, values=None):
169 """Check access wrt. res_model, relax the rule of ir.attachment parent
171 With 'document' installed, everybody will have access to attachments of
172 any resources they can *read*.
174 return super(document_file, self).check(cr, uid, ids, mode='read',
175 context=context, values=values)
177 def copy(self, cr, uid, id, default=None, context=None):
180 if 'name' not in default:
181 name = self.read(cr, uid, [id])[0]['name']
182 default.update({'name': name + " (copy)"})
183 return super(document_file, self).copy(cr, uid, id, default, context=context)
185 def write(self, cr, uid, ids, vals, context=None):
187 if not isinstance(ids, list):
189 res = self.search(cr, uid, [('id', 'in', ids)])
192 if not self._check_duplication(cr, uid, vals, ids, 'write'):
193 raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
195 # if nodes call this write(), they must skip the code below
196 from_node = context and context.get('__from_node', False)
197 if (('parent_id' in vals) or ('name' in vals)) and not from_node:
198 # perhaps this file is renaming or changing directory
199 nctx = nodes.get_node_context(cr,uid,context={})
200 dirobj = self.pool.get('document.directory')
201 if 'parent_id' in vals:
202 dbro = dirobj.browse(cr, uid, vals['parent_id'], context=context)
203 dnode = nctx.get_dir_node(cr, dbro)
208 for fbro in self.browse(cr, uid, ids, context=context):
209 if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \
210 and ('name' not in vals or fbro.name == vals['name']):
213 fnode = nctx.get_file_node(cr, fbro)
214 res = fnode.move_to(cr, dnode or fnode.parent, vals.get('name', fbro.name), fbro, dbro, True)
215 if isinstance(res, dict):
218 wid = res.get('id', fbro.id)
219 result = super(document_file,self).write(cr,uid,wid,vals2,context=context)
220 # TODO: how to handle/merge several results?
226 if 'file_size' in vals: # only write that field using direct SQL calls
227 del vals['file_size']
228 if len(ids) and len(vals):
229 result = super(document_file,self).write(cr, uid, ids, vals, context=context)
233 def create(self, cr, uid, vals, context=None):
236 vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
237 if not vals['parent_id']:
238 vals['parent_id'] = self.pool.get('document.directory')._get_root_directory(cr,uid, context)
239 if not vals.get('res_id', False) and context.get('default_res_id', False):
240 vals['res_id'] = context.get('default_res_id', False)
241 if not vals.get('res_model', False) and context.get('default_res_model', False):
242 vals['res_model'] = context.get('default_res_model', False)
243 if vals.get('res_id', False) and vals.get('res_model', False) \
244 and not vals.get('partner_id', False):
245 vals['partner_id'] = self.__get_partner_id(cr, uid, \
246 vals['res_model'], vals['res_id'], context)
249 if vals.get('link', False) :
251 datas = base64.encodestring(urllib.urlopen(vals['link']).read())
253 datas = vals.get('datas', False)
256 vals['file_size'] = len(datas)
258 if vals.get('file_size'):
259 del vals['file_size']
260 if not self._check_duplication(cr, uid, vals):
261 raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
262 result = super(document_file, self).create(cr, uid, vals, context)
266 def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
267 """ A helper to retrieve the associated partner from any res_model+id
268 It is a hack that will try to discover if the mentioned record is
269 clearly associated with a partner record.
271 obj_model = self.pool.get(res_model)
272 if obj_model._name == 'res.partner':
274 elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
275 bro = obj_model.browse(cr, uid, res_id, context=context)
276 return bro.partner_id.id
277 elif 'address_id' in obj_model._columns and obj_model._columns['address_id']._obj == 'res.partner.address':
278 bro = obj_model.browse(cr, uid, res_id, context=context)
279 return bro.address_id.partner_id.id
282 def unlink(self, cr, uid, ids, context=None):
283 stor = self.pool.get('document.storage')
285 # We have to do the unlink in 2 stages: prepare a list of actual
286 # files to be unlinked, update the db (safer to do first, can be
287 # rolled back) and then unlink the files. The list wouldn't exist
288 # after we discard the objects
289 ids = self.search(cr, uid, [('id','in',ids)])
290 for f in self.browse(cr, uid, ids, context=context):
291 # TODO: update the node cache
296 storage_id = par.storage_id
299 #assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
301 r = stor.prepare_unlink(cr, uid, storage_id, f)
305 logging.getLogger('document').warning("Unlinking attachment #%s %s that has no storage",
307 res = super(document_file, self).unlink(cr, uid, ids, context)
308 stor.do_unlink(cr, uid, unres)