Have (ir.attachment, context), we modify the file (save, update, rename etc).
Have (directory, context), we create a file.
Have (path, context), we create or modify a file.
-
+
Note that in all above cases, we don't explicitly choose the storage media,
but always require a context to be present.
media + directory.
The algorithm says that in any of the above cases, our first goal is to locate
-the node for any combination of search criteria. It would be wise NOT to
+the node for any combination of search criteria. It would be wise NOT to
represent each node in the path (like node[/] + node[/dir1] + node[/dir1/dir2])
but directly jump to the end node (like node[/dir1/dir2]) whenever possible.
mode = mode[:-1]
self.mode = mode
self._size = os.stat(path).st_size
-
+
for attr in ('closed', 'read', 'write', 'seek', 'tell', 'next'):
setattr(self,attr, getattr(self.__file, attr))
def size(self):
return self._size
-
+
def __iter__(self):
return self
filename = par.path
if isinstance(filename, (tuple, list)):
filename = '/'.join(filename)
-
+
try:
mime, icont = cntIndex.doIndex(None, filename=filename,
content_type=None, realfname=fname)
self._size = 0L
if mode.endswith('b'):
mode = mode[:-1]
-
+
if mode in ('r', 'r+'):
cr = ira_browse._cr # reuse the cursor of the browse object, just now
cr.execute('SELECT db_datas FROM ir_attachment WHERE id = %s',(ira_browse.id,))
filename = par.path
if isinstance(filename, (tuple, list)):
filename = '/'.join(filename)
-
+
try:
mime, icont = cntIndex.doIndex(data, filename=filename,
content_type=None, realfname=None)
class nodefd_db64(StringIO, nodes.node_descriptor):
""" A descriptor to db data, base64 (the old way)
-
+
It stores the data in base64 encoding at the db. Not optimal, but
the transparent compression of Postgres will save the day.
"""
self._size = 0L
if mode.endswith('b'):
mode = mode[:-1]
-
+
if mode in ('r', 'r+'):
data = base64.decodestring(ira_browse.db_datas)
if data:
filename = par.path
if isinstance(filename, (tuple, list)):
filename = '/'.join(filename)
-
+
try:
mime, icont = cntIndex.doIndex(data, filename=filename,
content_type=None, realfname=None)
media.
The referring document.directory-ies will control the placement of data
into the storage.
-
+
It is a bad idea to have multiple document.storage objects pointing to
the same tree of filesystem storage.
"""
def __prepare_realpath(self, cr, file_node, ira, store_path, do_create=True):
""" Cleanup path for realstore, create dirs if needed
-
+
@param file_node the node
@param ira ir.attachment browse of the file_node
@param store_path the path of the parent storage object, list
@param do_create create the directories, if needed
-
+
@return tuple(path "/var/filestore/real/dir/", npath ['dir','fname.ext'] )
"""
file_node.fix_ppath(cr, ira)
optionally, fil_obj could point to the browse object of the file
(ir.attachment)
"""
- if not context:
- context = {}
- boo = self.browse(cr, uid, id, context)
+ boo = self.browse(cr, uid, id, context=context)
if not boo.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if fil_obj:
ira = fil_obj
else:
"""
if context is None:
context = {}
- boo = self.browse(cr, uid, id, context)
+ boo = self.browse(cr, uid, id, context=context)
if not boo.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if boo.readonly and mode not in ('r', 'rb'):
raise IOError(errno.EPERM, "Readonly medium")
-
+
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
if boo.type == 'filestore':
if not ira.store_fname:
elif boo.type == 'virtual':
raise ValueError('Virtual storage does not support static files')
-
+
else:
raise TypeError("No %s storage" % boo.type)
This function MUST be used from an ir.attachment. It wouldn't make sense
to store things persistently for other types (dynamic).
"""
- if not context:
- context = {}
- boo = self.browse(cr, uid, id, context)
+ boo = self.browse(cr, uid, id, context=context)
if fil_obj:
ira = fil_obj
else:
if not boo.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if boo.readonly:
raise IOError(errno.EPERM, "Readonly medium")
fp = open(fname, 'wb')
try:
fp.write(data)
- finally:
+ finally:
fp.close()
self._doclog.debug( "Saved data to %s" % fname)
filesize = len(data) # os.stat(fname).st_size
-
+
# TODO Here, an old file would be left hanging.
except Exception, e:
fp = open(fname,'wb')
try:
fp.write(data)
- finally:
+ finally:
fp.close()
self._doclog.debug("Saved data to %s", fname)
filesize = len(data) # os.stat(fname).st_size
if not storage_bo.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if storage_bo.readonly:
raise IOError(errno.EPERM, "Readonly medium")
""" A preparation for a file rename.
It will not affect the database, but merely check and perhaps
rename the realstore file.
-
+
@return the dict of values that can safely be be stored in the db.
"""
sbro = self.browse(cr, uid, file_node.storage_id, context=context)
if not sbro.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if sbro.readonly:
raise IOError(errno.EPERM, "Readonly medium")
""" A preparation for a file move.
It will not affect the database, but merely check and perhaps
move the realstore file.
-
+
@param ndir_bro a browse object of document.directory, where this
file should move to.
@return the dict of values that can safely be be stored in the db.
if not sbro.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if sbro.readonly:
raise IOError(errno.EPERM, "Readonly medium")
self._doclog.warning("inconsistency in realstore: %s != %s" , fname, repr(opath))
oldpath = os.path.join(path, opath[-1])
-
+
npath = [sbro.path,] + (ndir_bro.get_full_path() or [])
npath = filter(lambda x: x is not None, npath)
newdir = os.path.join(*npath)
os.makedirs(newdir)
npath.append(opath[-1])
newpath = os.path.join(*npath)
-
+
self._doclog.debug("Going to move %s from %s to %s", opath[-1], oldpath, newpath)
shutil.move(oldpath, newpath)
-
+
store_path = npath[1:] + [opath[-1],]
store_fname = os.path.join(*store_path)
-
+
return { 'store_fname': store_fname }
else:
raise TypeError("No %s storage" % sbro.type)