1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
23 from osv import osv, fields
26 # from psycopg2 import Binary
27 #from tools import config
29 from tools.translate import _
32 DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config['root_path'], 'filestore'))
34 class document_file(osv.osv):
35 _inherit = 'ir.attachment'
36 _rec_name = 'datas_fname'
37 def _get_filestore(self, cr):
38 return os.path.join(DMS_ROOT_PATH, cr.dbname)
40 def _data_get(self, cr, uid, ids, name, arg, context):
41 fbrl = self.browse(cr, uid, ids, context=context)
42 nctx = nodes.get_node_context(cr, uid, context={})
43 # nctx will /not/ inherit the caller's context. Most of
44 # it would be useless, anyway (like active_id, active_model,
47 bin_size = context.get('bin_size', False)
49 fnode = nodes.node_file(None, None, nctx, fbro)
51 data = fnode.get_data(cr, fbro)
52 result[fbro.id] = base64.encodestring(data or '')
54 result[fbro.id] = fnode.get_data_len(cr, fbro)
59 # This code can be improved
61 def _data_set(self, cr, uid, id, name, value, arg, context):
64 fbro = self.browse(cr, uid, id, context=context)
65 nctx = nodes.get_node_context(cr, uid, context={})
66 fnode = nodes.node_file(None, None, nctx, fbro)
67 res = fnode.set_data(cr, base64.decodestring(value), fbro)
71 # Columns from ir.attachment:
72 'create_date': fields.datetime('Date Created', readonly=True),
73 'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
74 'write_date': fields.datetime('Date Modified', readonly=True),
75 'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
76 'res_model': fields.char('Attached Model', size=64, readonly=True),
77 'res_id': fields.integer('Attached ID', readonly=True),
79 # If ir.attachment contained any data before document is installed, preserve
80 # the data, don't drop the column!
81 'db_datas': fields.binary('Data', oldname='datas'),
82 'datas': fields.function(_data_get, method=True, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
85 'user_id': fields.many2one('res.users', 'Owner', select=1),
86 # 'group_ids': fields.many2many('res.groups', 'document_group_rel', 'item_id', 'group_id', 'Groups'),
87 # the directory id now is mandatory. It can still be computed automatically.
88 'parent_id': fields.many2one('document.directory', 'Directory', select=1, required=True),
89 'index_content': fields.text('Indexed Content'),
90 'partner_id':fields.many2one('res.partner', 'Partner', select=1),
91 'company_id': fields.many2one('res.company', 'Company'),
92 'file_size': fields.integer('File Size', required=True),
93 'file_type': fields.char('Content Type', size=128),
95 # fields used for file storage
96 'store_fname': fields.char('Stored Filename', size=200),
98 _order = "create_date desc"
100 def __get_def_directory(self, cr, uid, context=None):
101 dirobj = self.pool.get('document.directory')
102 return dirobj._get_root_directory(cr, uid, context)
105 'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c),
106 'user_id': lambda self, cr, uid, ctx:uid,
107 'file_size': lambda self, cr, uid, ctx:0,
108 'parent_id': __get_def_directory
111 ('filename_uniq', 'unique (name,parent_id,res_id,res_model)', 'The file name must be unique !')
113 def _check_duplication(self, cr, uid, vals, ids=[], op='create'):
114 name = vals.get('name', False)
115 parent_id = vals.get('parent_id', False)
116 res_model = vals.get('res_model', False)
117 res_id = vals.get('res_id', 0)
119 for file in self.browse(cr, uid, ids): # FIXME fields_only
123 parent_id = file.parent_id and file.parent_id.id or False
125 res_model = file.res_model and file.res_model or False
127 res_id = file.res_id and file.res_id or 0
128 res = self.search(cr, uid, [('id', '<>', file.id), ('name', '=', name), ('parent_id', '=', parent_id), ('res_model', '=', res_model), ('res_id', '=', res_id)])
132 res = self.search(cr, uid, [('name', '=', name), ('parent_id', '=', parent_id), ('res_id', '=', res_id), ('res_model', '=', res_model)])
137 def copy(self, cr, uid, id, default=None, context=None):
140 if 'name' not in default:
141 name = self.read(cr, uid, [id])[0]['name']
142 default.update({'name': name + " (copy)"})
143 return super(document_file, self).copy(cr, uid, id, default, context)
145 def write(self, cr, uid, ids, vals, context=None):
147 if not isinstance(ids, list):
149 res = self.search(cr, uid, [('id', 'in', ids)])
152 if not self._check_duplication(cr, uid, vals, ids, 'write'):
153 raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
155 # if nodes call this write(), they must skip the code below
156 from_node = context and context.get('__from_node', False)
157 if (('parent_id' in vals) or ('name' in vals)) and not from_node:
158 # perhaps this file is renaming or changing directory
159 nctx = nodes.get_node_context(cr,uid,context={})
160 dirobj = self.pool.get('document.directory')
161 if 'parent_id' in vals:
162 dbro = dirobj.browse(cr, uid, vals['parent_id'], context=context)
163 dnode = nctx.get_dir_node(cr, dbro)
168 for fbro in self.browse(cr, uid, ids, context=context):
169 if ('parent_id' not in vals or fbro.parent_id.id == vals['parent_id']) \
170 and ('name' not in vals or fbro.name == vals['name']) :
173 fnode = nctx.get_file_node(cr, fbro)
174 res = fnode.move_to(cr, dnode or fnode.parent, vals.get('name', fbro.name), fbro, dbro, True)
175 if isinstance(res, dict):
178 wid = res.get('id', fbro.id)
179 result = super(document_file,self).write(cr,uid,wid,vals2,context=context)
180 # TODO: how to handle/merge several results?
186 if 'file_size' in vals: # only write that field using direct SQL calls
187 del vals['file_size']
188 if len(ids) and len(vals):
189 result = super(document_file,self).write(cr, uid, ids, vals, context=context)
193 def create(self, cr, uid, vals, context=None):
196 vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
197 if not vals['parent_id']:
198 vals['parent_id'] = self.pool.get('document.directory')._get_root_directory(cr,uid, context)
199 if not vals.get('res_id', False) and context.get('default_res_id', False):
200 vals['res_id'] = context.get('default_res_id', False)
201 if not vals.get('res_model', False) and context.get('default_res_model', False):
202 vals['res_model'] = context.get('default_res_model', False)
203 if vals.get('res_id', False) and vals.get('res_model', False) \
204 and not vals.get('partner_id', False):
205 vals['partner_id'] = self.__get_partner_id(cr, uid, \
206 vals['res_model'], vals['res_id'], context)
209 if vals.get('link', False) :
211 datas = base64.encodestring(urllib.urlopen(vals['link']).read())
213 datas = vals.get('datas', False)
216 vals['file_size'] = len(datas)
218 if vals.get('file_size'):
219 del vals['file_size']
220 if not self._check_duplication(cr, uid, vals):
221 raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
222 result = super(document_file, self).create(cr, uid, vals, context)
226 def __get_partner_id(self, cr, uid, res_model, res_id, context):
227 """ A helper to retrieve the associated partner from any res_model+id
228 It is a hack that will try to discover if the mentioned record is
229 clearly associated with a partner record.
231 obj_model = self.pool.get(res_model)
232 if obj_model._name == 'res.partner':
234 elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
235 bro = obj_model.browse(cr, uid, res_id, context=context)
236 return bro.partner_id.id
237 elif 'address_id' in obj_model._columns and obj_model._columns['address_id']._obj == 'res.partner.address':
238 bro = obj_model.browse(cr, uid, res_id, context=context)
239 return bro.address_id.partner_id.id
242 def unlink(self, cr, uid, ids, context=None):
243 stor = self.pool.get('document.storage')
245 # We have to do the unlink in 2 stages: prepare a list of actual
246 # files to be unlinked, update the db (safer to do first, can be
247 # rolled back) and then unlink the files. The list wouldn't exist
248 # after we discard the objects
250 for f in self.browse(cr, uid, ids, context):
251 # TODO: update the node cache
256 storage_id = par.storage_id
259 assert storage_id, "Strange, found file #%s w/o storage!" % f.id
260 r = stor.prepare_unlink(cr, uid, storage_id, f)
263 res = super(document_file, self).unlink(cr, uid, ids, context)
264 stor.do_unlink(cr, uid, unres)