import base64
import errno
import logging
+import shutil
from StringIO import StringIO
import psycopg2
We have to consider 3 cases of data /retrieval/:
Given (context,path) we need to access the file (aka. node).
given (directory, context), we need one of its children (for listings, views)
- given (ir.attachment, context), we needs its data and metadata (node).
+ given (ir.attachment, context), we need its data and metadata (node).
For data /storage/ we have the cases:
Have (ir.attachment, context), we modify the file (save, update, rename etc).
Have (directory, context), we create a file.
Have (path, context), we create or modify a file.
-
+
Note that in all above cases, we don't explicitly choose the storage media,
but always require a context to be present.
media + directory.
The algorithm says that in any of the above cases, our first goal is to locate
-the node for any combination of search criteria. It would be wise NOT to
+the node for any combination of search criteria. It would be wise NOT to
represent each node in the path (like node[/] + node[/dir1] + node[/dir1/dir2])
but directly jump to the end node (like node[/dir1/dir2]) whenever possible.
if mode.endswith('b'):
mode = mode[:-1]
self.mode = mode
-
- for attr in ('closed', 'read', 'write', 'seek', 'tell'):
+ self._size = os.stat(path).st_size
+
+ for attr in ('closed', 'read', 'write', 'seek', 'tell', 'next'):
setattr(self,attr, getattr(self.__file, attr))
+ def size(self):
+ return self._size
+
+ def __iter__(self):
+ return self
+
def close(self):
# TODO: locking in init, close()
fname = self.__file.name
filename = par.path
if isinstance(filename, (tuple, list)):
filename = '/'.join(filename)
-
+
try:
mime, icont = cntIndex.doIndex(None, filename=filename,
content_type=None, realfname=fname)
"""
def __init__(self, parent, ira_browse, mode):
nodes.node_descriptor.__init__(self, parent)
+ self._size = 0L
if mode.endswith('b'):
mode = mode[:-1]
-
+
if mode in ('r', 'r+'):
cr = ira_browse._cr # reuse the cursor of the browse object, just now
cr.execute('SELECT db_datas FROM ir_attachment WHERE id = %s',(ira_browse.id,))
data = cr.fetchone()[0]
+ if data:
+ self._size = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
raise IOError(errno.EINVAL, "Invalid file mode")
self.mode = mode
+ def size(self):
+ return self._size
+
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
filename = par.path
if isinstance(filename, (tuple, list)):
filename = '/'.join(filename)
-
+
try:
mime, icont = cntIndex.doIndex(data, filename=filename,
content_type=None, realfname=None)
class nodefd_db64(StringIO, nodes.node_descriptor):
""" A descriptor to db data, base64 (the old way)
-
+
It stores the data in base64 encoding at the db. Not optimal, but
the transparent compression of Postgres will save the day.
"""
def __init__(self, parent, ira_browse, mode):
nodes.node_descriptor.__init__(self, parent)
+ self._size = 0L
if mode.endswith('b'):
mode = mode[:-1]
-
+
if mode in ('r', 'r+'):
- StringIO.__init__(self, base64.decodestring(ira_browse.db_datas))
+ data = base64.decodestring(ira_browse.db_datas)
+ if data:
+ self._size = len(data)
+ StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
raise IOError(errno.EINVAL, "Invalid file mode")
self.mode = mode
+ def size(self):
+ return self._size
+
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
filename = par.path
if isinstance(filename, (tuple, list)):
filename = '/'.join(filename)
-
+
try:
mime, icont = cntIndex.doIndex(data, filename=filename,
content_type=None, realfname=None)
media.
The referring document.directory-ies will control the placement of data
into the storage.
-
+
It is a bad idea to have multiple document.storage objects pointing to
the same tree of filesystem storage.
"""
def __prepare_realpath(self, cr, file_node, ira, store_path, do_create=True):
""" Cleanup path for realstore, create dirs if needed
-
+
@param file_node the node
@param ira ir.attachment browse of the file_node
@param store_path the path of the parent storage object, list
@param do_create create the directories, if needed
-
+
@return tuple(path "/var/filestore/real/dir/", npath ['dir','fname.ext'] )
"""
file_node.fix_ppath(cr, ira)
optionally, fil_obj could point to the browse object of the file
(ir.attachment)
"""
- if not context:
- context = {}
- boo = self.browse(cr, uid, id, context)
+ boo = self.browse(cr, uid, id, context=context)
if not boo.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if fil_obj:
ira = fil_obj
else:
"""
if context is None:
context = {}
- boo = self.browse(cr, uid, id, context)
+ boo = self.browse(cr, uid, id, context=context)
if not boo.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if boo.readonly and mode not in ('r', 'rb'):
raise IOError(errno.EPERM, "Readonly medium")
-
+
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
if boo.type == 'filestore':
if not ira.store_fname:
elif boo.type == 'realstore':
path, npath = self.__prepare_realpath(cr, file_node, ira, boo.path,
- do_create = (mode[1] in ('w','a')) )
+ do_create = (mode[0] in ('w','a')) )
fpath = os.path.join(path, npath[-1])
- if (not os.path.exists(fpath)) and mode[1] == 'r':
+ if (not os.path.exists(fpath)) and mode[0] == 'r':
raise IOError("File not found: %s" % fpath)
- elif mode[1] in ('w', 'a') and not ira.store_fname:
+ elif mode[0] in ('w', 'a') and not ira.store_fname:
store_fname = os.path.join(*npath)
cr.execute('UPDATE ir_attachment SET store_fname = %s WHERE id = %s',
(store_fname, ira.id))
elif boo.type == 'virtual':
raise ValueError('Virtual storage does not support static files')
-
+
else:
raise TypeError("No %s storage" % boo.type)
This function MUST be used from an ir.attachment. It wouldn't make sense
to store things persistently for other types (dynamic).
"""
- if not context:
- context = {}
- boo = self.browse(cr, uid, id, context)
+ boo = self.browse(cr, uid, id, context=context)
if fil_obj:
ira = fil_obj
else:
if not boo.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if boo.readonly:
raise IOError(errno.EPERM, "Readonly medium")
try:
store_fname = self.__get_random_fname(path)
fname = os.path.join(path, store_fname)
- fp = file(fname, 'wb')
- fp.write(data)
- fp.close()
+ fp = open(fname, 'wb')
+ try:
+ fp.write(data)
+ finally:
+ fp.close()
self._doclog.debug( "Saved data to %s" % fname)
filesize = len(data) # os.stat(fname).st_size
-
+
# TODO Here, an old file would be left hanging.
except Exception, e:
try:
path, npath = self.__prepare_realpath(cr, file_node, ira, boo.path, do_create=True)
fname = os.path.join(path, npath[-1])
- fp = file(fname,'wb')
- fp.write(data)
- fp.close()
+ fp = open(fname,'wb')
+ try:
+ fp.write(data)
+ finally:
+ fp.close()
self._doclog.debug("Saved data to %s", fname)
filesize = len(data) # os.stat(fname).st_size
store_fname = os.path.join(*npath)
if not storage_bo.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if storage_bo.readonly:
raise IOError(errno.EPERM, "Readonly medium")
if ktype == 'file':
try:
os.unlink(fname)
- except Exception, e:
+ except Exception:
self._doclog.warning("Could not remove file %s, please remove manually.", fname, exc_info=True)
else:
self._doclog.warning("Unknown unlink key %s" % ktype)
""" A preparation for a file rename.
It will not affect the database, but merely check and perhaps
rename the realstore file.
-
+
@return the dict of values that can safely be be stored in the db.
"""
sbro = self.browse(cr, uid, file_node.storage_id, context=context)
if not sbro.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if sbro.readonly:
raise IOError(errno.EPERM, "Readonly medium")
""" A preparation for a file move.
It will not affect the database, but merely check and perhaps
move the realstore file.
-
+
@param ndir_bro a browse object of document.directory, where this
file should move to.
@return the dict of values that can safely be be stored in the db.
if not sbro.online:
raise IOError(errno.EREMOTE, 'medium offline')
-
+
if sbro.readonly:
raise IOError(errno.EPERM, "Readonly medium")
# nothing to do for a rename, allow to change the db field
return { 'parent_id': ndir_bro.id }
elif sbro.type == 'realstore':
- raise NotImplementedError("Cannot move in realstore, yet") # TODO
- fname = fil_bo.store_fname
+ ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
+
+ path, opath = self.__prepare_realpath(cr, file_node, ira, sbro.path, do_create=False)
+ fname = ira.store_fname
+
if not fname:
- return ValueError("Tried to rename a non-stored file")
- path = sbro.path
- oldpath = os.path.join(path, fname)
-
- for ch in ('*', '|', "\\", '/', ':', '"', '<', '>', '?', '..'):
- if ch in new_name:
- raise ValueError("Invalid char %s in name %s" %(ch, new_name))
-
- file_node.fix_ppath(cr, ira)
- npath = file_node.full_path() or []
- dpath = [path,]
- dpath.extend(npath[:-1])
- dpath.append(new_name)
- newpath = os.path.join(*dpath)
- # print "old, new paths:", oldpath, newpath
- os.rename(oldpath, newpath)
- return { 'name': new_name, 'datas_fname': new_name, 'store_fname': new_name }
+ self._doclog.warning("Trying to rename a non-stored file")
+ if fname != os.path.join(*opath):
+ self._doclog.warning("inconsistency in realstore: %s != %s" , fname, repr(opath))
+
+ oldpath = os.path.join(path, opath[-1])
+
+ npath = [sbro.path,] + (ndir_bro.get_full_path() or [])
+ npath = filter(lambda x: x is not None, npath)
+ newdir = os.path.join(*npath)
+ if not os.path.isdir(newdir):
+ self._doclog.debug("Must create dir %s", newdir)
+ os.makedirs(newdir)
+ npath.append(opath[-1])
+ newpath = os.path.join(*npath)
+
+ self._doclog.debug("Going to move %s from %s to %s", opath[-1], oldpath, newpath)
+ shutil.move(oldpath, newpath)
+
+ store_path = npath[1:] + [opath[-1],]
+ store_fname = os.path.join(*store_path)
+
+ return { 'store_fname': store_fname }
else:
raise TypeError("No %s storage" % sbro.type)