import calendar
import copy
import datetime
+import itertools
import logging
import operator
import pickle
import re
import simplejson
import time
-import traceback
import types
import warnings
from lxml import etree
simplify_modifiers(modifiers)
node.set('modifiers', simplejson.dumps(modifiers))
+def setup_modifiers(node, field=None, context=None, in_tree_view=False):
+ """ Processes node attributes and field descriptors to generate
+ the ``modifiers`` node attribute and set it on the provided node.
+
+ Alters its first argument in-place.
+
+ :param node: ``field`` node from an OpenERP view
+ :type node: lxml.etree._Element
+ :param dict field: field descriptor corresponding to the provided node
+ :param dict context: execution context used to evaluate node attributes
+ :param bool in_tree_view: triggers the ``tree_invisible`` code
+ path (separate from ``invisible``): in
+ tree view there are two levels of
+ invisibility, cell content (a column is
+ present but the cell itself is not
+ displayed) with ``invisible`` and column
+ invisibility (the whole column is
+ hidden) with ``tree_invisible``.
+ :returns: nothing
+ """
+ modifiers = {}
+ if field is not None:
+ transfer_field_to_modifiers(field, modifiers)
+ transfer_node_to_modifiers(
+ node, modifiers, context=context, in_tree_view=in_tree_view)
+ transfer_modifiers_to_node(modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
'SET DEFAULT': 'd',
}
-def last_day_of_current_month():
- today = datetime.date.today()
- last_day = str(calendar.monthrange(today.year, today.month)[1])
- return time.strftime('%Y-%m-' + last_day)
-
def intersect(la, lb):
return filter(lambda x: x in lb, la)
self._cr = cr
self._uid = uid
self._id = id
- self._table = table
+ self._table = table # deprecated, use _model!
+ self._model = table
self._table_name = self._table._name
self.__logger = logging.getLogger(
'osv.browse_record.' + self._table_name)
else:
return attr
else:
- self.logger.notifyChannel("browse_record", netsvc.LOG_WARNING,
- "Field '%s' does not exist in object '%s': \n%s" % (
- name, self, ''.join(traceback.format_exc())))
- raise KeyError("Field '%s' does not exist in object '%s'" % (
- name, self))
+ error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
+ self.logger.notifyChannel("browse_record", netsvc.LOG_WARNING, error_msg)
+ raise KeyError(error_msg)
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
if col._prefetch:
def __contains__(self, name):
return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
+ def __iter__(self):
+ raise NotImplementedError("Iteration is not allowed on %s" % self)
+
def __hasattr__(self, name):
return name in self
self._cache[model].clear()
self._cache[model].update(cached_ids)
-def get_pg_type(f):
+def pg_varchar(size=0):
+ """ Returns the VARCHAR declaration for the provided size:
+
+ * If no size (or an empty or negative size is provided) return an
+ 'infinite' VARCHAR
+ * Otherwise return a VARCHAR(n)
+
+ :type int size: varchar size, optional
+ :rtype: str
+ """
+ if size:
+ if not isinstance(size, int):
+ raise TypeError("VARCHAR parameter should be an int, got %s"
+ % type(size))
+ if size > 0:
+ return 'VARCHAR(%d)' % size
+ return 'VARCHAR'
+
+FIELDS_TO_PGTYPES = {
+ fields.boolean: 'bool',
+ fields.integer: 'int4',
+ fields.integer_big: 'int8',
+ fields.text: 'text',
+ fields.date: 'date',
+ fields.time: 'time',
+ fields.datetime: 'timestamp',
+ fields.binary: 'bytea',
+ fields.many2one: 'int4',
+}
+
+def get_pg_type(f, type_override=None):
"""
- returns a tuple
- (type returned by postgres when the column was created, type expression to create the column)
+ :param fields._column f: field to get a Postgres type for
+ :param type type_override: use the provided type for dispatching instead of the field's own type
+ :returns: (postgres_identification_type, postgres_type_specification)
+ :rtype: (str, str)
"""
+ field_type = type_override or type(f)
- type_dict = {
- fields.boolean: 'bool',
- fields.integer: 'int4',
- fields.integer_big: 'int8',
- fields.text: 'text',
- fields.date: 'date',
- fields.time: 'time',
- fields.datetime: 'timestamp',
- fields.binary: 'bytea',
- fields.many2one: 'int4',
- }
- if type(f) in type_dict:
- f_type = (type_dict[type(f)], type_dict[type(f)])
- elif isinstance(f, fields.float):
+ if field_type in FIELDS_TO_PGTYPES:
+ pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
+ elif issubclass(field_type, fields.float):
if f.digits:
- f_type = ('numeric', 'NUMERIC')
+ pg_type = ('numeric', 'NUMERIC')
else:
- f_type = ('float8', 'DOUBLE PRECISION')
- elif isinstance(f, (fields.char, fields.reference)):
- f_type = ('varchar', 'VARCHAR(%d)' % (f.size,))
- elif isinstance(f, fields.selection):
- if isinstance(f.selection, list) and isinstance(f.selection[0][0], (str, unicode)):
- f_size = reduce(lambda x, y: max(x, len(y[0])), f.selection, f.size or 16)
- elif isinstance(f.selection, list) and isinstance(f.selection[0][0], int):
- f_size = -1
+ pg_type = ('float8', 'DOUBLE PRECISION')
+ elif issubclass(field_type, (fields.char, fields.reference)):
+ pg_type = ('varchar', pg_varchar(f.size))
+ elif issubclass(field_type, fields.selection):
+ if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
+ or getattr(f, 'size', None) == -1:
+ pg_type = ('int4', 'INTEGER')
else:
- f_size = getattr(f, 'size', None) or 16
-
- if f_size == -1:
- f_type = ('int4', 'INTEGER')
+ pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
+ elif issubclass(field_type, fields.function):
+ if f._type == 'selection':
+ pg_type = ('varchar', pg_varchar())
else:
- f_type = ('varchar', 'VARCHAR(%d)' % f_size)
- elif isinstance(f, fields.function) and eval('fields.'+(f._type), globals()) in type_dict:
- t = eval('fields.'+(f._type), globals())
- f_type = (type_dict[t], type_dict[t])
- elif isinstance(f, fields.function) and f._type == 'float':
- if f.digits:
- f_type = ('numeric', 'NUMERIC')
- else:
- f_type = ('float8', 'DOUBLE PRECISION')
- elif isinstance(f, fields.function) and f._type == 'selection':
- f_type = ('text', 'text')
- elif isinstance(f, fields.function) and f._type == 'char':
- f_type = ('varchar', 'VARCHAR(%d)' % (f.size))
+ pg_type = get_pg_type(f, getattr(fields, f._type))
else:
- logger = netsvc.Logger()
- logger.notifyChannel("init", netsvc.LOG_WARNING, '%s type not supported!' % (type(f)))
- f_type = None
- return f_type
+ logging.getLogger('orm').warn('%s type not supported!', field_type)
+ pg_type = None
+
+ return pg_type
class MetaModel(type):
_sequence = None
_description = None
+ # dict of {field:method}, with method returning the name_get of records
+ # to include in the _read_group, if grouped on this field
+ _group_by_full = {}
+
# Transience
_transient = False # True in a TransientModel
_transient_max_count = None
raise TypeError('_name is mandatory in case of multiple inheritance')
for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
- parent_class = pool.get(parent_name).__class__
- if not pool.get(parent_name):
+ parent_model = pool.get(parent_name)
+ if not getattr(cls, '_original_module', None) and name == parent_model._name:
+ cls._original_module = parent_model._original_module
+ if not parent_model:
raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
+ parent_class = parent_model.__class__
nattr = {}
for s in attributes:
- new = copy.copy(getattr(pool.get(parent_name), s, {}))
+ new = copy.copy(getattr(parent_model, s, {}))
if s == '_columns':
# Don't _inherit custom fields.
for c in new.keys():
new.extend(cls.__dict__.get(s, []))
nattr[s] = new
cls = type(name, (cls, parent_class), dict(nattr, _register=False))
+ if not getattr(cls, '_original_module', None):
+ cls._original_module = cls._module
obj = object.__new__(cls)
obj.__init__(pool, cr)
return obj
f = self._columns[store_field]
if hasattr(f, 'digits_change'):
f.digits_change(cr)
+ def not_this_field(stored_func):
+ x, y, z, e, f, l = stored_func
+ return x != self._name or y != store_field
+ self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, []))
if not isinstance(f, fields.function):
continue
if not f.store:
continue
- if self._columns[store_field].store is True:
+ sm = f.store
+ if sm is True:
sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, 10, None)}
- else:
- sm = self._columns[store_field].store
for object, aa in sm.items():
if len(aa) == 4:
(fnct, fields2, order, length) = aa
raise except_orm('Error',
('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
self.pool._store_function.setdefault(object, [])
- ok = True
- for x, y, z, e, f, l in self.pool._store_function[object]:
- if (x==self._name) and (y==store_field) and (e==fields2):
- if f == order:
- ok = False
- if ok:
- self.pool._store_function[object].append( (self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
- self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
+ self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
+ self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
for (key, _, msg) in self._sql_constraints:
self.pool._sql_error[self._table+'_'+key] = msg
else:
r = d['name']
else:
- break
+ postfix = 0
+ while True:
+ n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
+ if not model_data.search(cr, uid, [('name', '=', n)]):
+ break
+ postfix += 1
+ model_data.create(cr, uid, {
+ 'name': n,
+ 'model': self._name,
+ 'res_id': r['id'],
+ 'module': '__export__',
+ })
+ r = n
else:
r = r[f[i]]
# To display external name of selection field when its exported
return {'datas': datas}
def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
- """
- Import given data in given module
-
- :param cr: database cursor
- :param uid: current user id
- :param fields: list of fields
- :param data: data to import
- :param mode: 'init' or 'update' for record creation
- :param current_module: module name
- :param noupdate: flag for record creation
- :param context: context arguments, like lang, time zone,
- :param filename: optional file to store partial import state for recovery
- :rtype: tuple
+ """Import given data in given module
This method is used when importing data via client menu.
order_line/price_unit,
order_line/product_uom_qty,
order_line/product_uom/id (=xml_id)
+
+ This method returns a 4-tuple with the following structure::
+
+ (return_code, errored_resource, error_message, unused)
+
+ * The first item is a return code, it is ``-1`` in case of
+ import error, or the last imported row number in case of success
+ * The second item contains the record data dict that failed to import
+ in case of error, otherwise it's 0
+ * The third item contains an error message string in case of error,
+ otherwise it's 0
+ * The last item is currently unused, with no specific semantics
+
+ :param fields: list of fields to import
+ :param data: data to import
+ :param mode: 'init' or 'update' for record creation
+ :param current_module: module name
+ :param noupdate: flag for record creation
+ :param filename: optional file to store partial import state for recovery
+ :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
+ :rtype: (int, dict or 0, str or 0, str or 0)
"""
if not context:
context = {}
fields = map(fix_import_export_id_paths, fields)
- logger = netsvc.Logger()
ir_model_data_obj = self.pool.get('ir.model.data')
# mode: id (XML id) or .id (database id) or False for name_get
nbrmax = position+1
done = {}
- for i in range(len(fields)):
+ for i, field in enumerate(fields):
res = False
if i >= len(line):
raise Exception(_('Please check that all your lines have %d columns.'
if not line[i]:
continue
- field = fields[i]
if field[:len(prefix)] <> prefix:
if line[i] and skip:
return False
continue
+ field_name = field[len(prefix)]
#set the mode for m2o, o2m, m2m : xml_id/id/name
if len(field) == len(prefix)+1:
return [(6,0,res)]
# ID of the record using a XML ID
- if field[len(prefix)]=='id':
+ if field_name == 'id':
try:
- data_res_id = _get_id(model_name, line[i], current_module, 'id')
+ data_res_id = _get_id(model_name, line[i], current_module)
except ValueError:
pass
xml_id = line[i]
continue
# ID of the record using a database ID
- elif field[len(prefix)]=='.id':
+ elif field_name == '.id':
data_res_id = _get_id(model_name, line[i], current_module, '.id')
continue
+ field_type = fields_def[field_name]['type']
# recursive call for getting children and returning [(0,0,{})] or [(1,ID,{})]
- if fields_def[field[len(prefix)]]['type']=='one2many':
- if field[len(prefix)] in done:
+ if field_type == 'one2many':
+ if field_name in done:
continue
- done[field[len(prefix)]] = True
- relation = fields_def[field[len(prefix)]]['relation']
+ done[field_name] = True
+ relation = fields_def[field_name]['relation']
relation_obj = self.pool.get(relation)
newfd = relation_obj.fields_get( cr, uid, context=context )
pos = position
first = 0
while pos < len(datas):
- res2 = process_liness(self, datas, prefix + [field[len(prefix)]], current_module, relation_obj._name, newfd, pos, first)
+ res2 = process_liness(self, datas, prefix + [field_name], current_module, relation_obj._name, newfd, pos, first)
if not res2:
break
(newrow, pos, w2, data_res_id2, xml_id2) = res2
res.append( (data_res_id2 and 1 or 0, data_res_id2 or 0, newrow) )
-
- elif fields_def[field[len(prefix)]]['type']=='many2one':
- relation = fields_def[field[len(prefix)]]['relation']
+ elif field_type == 'many2one':
+ relation = fields_def[field_name]['relation']
res = _get_id(relation, line[i], current_module, mode)
- elif fields_def[field[len(prefix)]]['type']=='many2many':
- relation = fields_def[field[len(prefix)]]['relation']
+ elif field_type == 'many2many':
+ relation = fields_def[field_name]['relation']
res = many_ids(line[i], relation, current_module, mode)
- elif fields_def[field[len(prefix)]]['type'] == 'integer':
+ elif field_type == 'integer':
res = line[i] and int(line[i]) or 0
- elif fields_def[field[len(prefix)]]['type'] == 'boolean':
+ elif field_type == 'boolean':
res = line[i].lower() not in ('0', 'false', 'off')
- elif fields_def[field[len(prefix)]]['type'] == 'float':
+ elif field_type == 'float':
res = line[i] and float(line[i]) or 0.0
- elif fields_def[field[len(prefix)]]['type'] == 'selection':
- for key, val in fields_def[field[len(prefix)]]['selection']:
+ elif field_type == 'selection':
+ for key, val in fields_def[field_name]['selection']:
if tools.ustr(line[i]) in [tools.ustr(key), tools.ustr(val)]:
res = key
break
if line[i] and not res:
- logger.notifyChannel("import", netsvc.LOG_WARNING,
- _("key '%s' not found in selection field '%s'") % \
- (tools.ustr(line[i]), tools.ustr(field[len(prefix)])))
- warning += [_("Key/value '%s' not found in selection field '%s'") % (tools.ustr(line[i]), tools.ustr(field[len(prefix)]))]
+ logging.getLogger('orm.import').warn(
+ _("key '%s' not found in selection field '%s'"),
+ tools.ustr(line[i]), tools.ustr(field_name))
+ warning.append(_("Key/value '%s' not found in selection field '%s'") % (
+ tools.ustr(line[i]), tools.ustr(field_name)))
else:
res = line[i]
- row[field[len(prefix)]] = res or False
+ row[field_name] = res or False
- result = (row, nbrmax, warning, data_res_id, xml_id)
- return result
+ return row, nbrmax, warning, data_res_id, xml_id
fields_def = self.fields_get(cr, uid, context=context)
- if config.get('import_partial', False) and filename:
- data = pickle.load(file(config.get('import_partial')))
-
position = 0
- while position<len(datas):
- res = {}
+ if config.get('import_partial') and filename:
+ with open(config.get('import_partial'), 'rb') as partial_import_file:
+ data = pickle.load(partial_import_file)
+ position = data.get(filename, 0)
+ while position<len(datas):
(res, position, warning, res_id, xml_id) = \
process_liness(self, datas, [], current_module, self._name, fields_def, position=position)
if len(warning):
cr.rollback()
- return (-1, res, 'Line ' + str(position) +' : ' + '!\n'.join(warning), '')
+ return -1, res, 'Line ' + str(position) +' : ' + '!\n'.join(warning), ''
try:
ir_model_data_obj._update(cr, uid, self._name,
current_module, res, mode=mode, xml_id=xml_id,
noupdate=noupdate, res_id=res_id, context=context)
except Exception, e:
- return (-1, res, 'Line ' + str(position) + ' : ' + tools.ustr(e), '')
+ return -1, res, 'Line ' + str(position) + ' : ' + tools.ustr(e), ''
- if config.get('import_partial', False) and filename and (not (position%100)):
- data = pickle.load(file(config.get('import_partial')))
+ if config.get('import_partial') and filename and (not (position%100)):
+ with open(config.get('import_partial'), 'rb') as partial_import:
+ data = pickle.load(partial_import)
data[filename] = position
- pickle.dump(data, file(config.get('import_partial'), 'wb'))
+ with open(config.get('import_partial'), 'wb') as partial_import:
+ pickle.dump(data, partial_import)
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
cr.commit()
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
- return (position, 0, 0, 0)
+ return position, 0, 0, 0
def get_invalid_fields(self, cr, uid):
return list(self._invalids)
""" Set invisible to true if the user is not in the specified groups. """
if node.get('groups'):
groups = node.get('groups').split(',')
- access_pool = self.pool.get('ir.model.access')
- can_see = any(access_pool.check_groups(cr, user, group) for group in groups)
+ ir_model_access = self.pool.get('ir.model.access')
+ can_see = any(ir_model_access.check_groups(cr, user, group) for group in groups)
if not can_see:
node.set('invisible', '1')
modifiers['invisible'] = True
raise except_orm('View error', msg)
return arch, fields
- def __get_default_calendar_view(self):
- """Generate a default calendar view (For internal use only).
+ def _get_default_form_view(self, cr, user, context=None):
+ """ Generates a default single-line form view using all fields
+ of the current model except the m2m and o2m ones.
+
+ :param cr: database cursor
+ :param int user: user id
+ :param dict context: connection context
+ :returns: a form view as an lxml document
+ :rtype: etree._Element
"""
- # TODO could return an etree instead of a string
+ view = etree.Element('form', string=self._description)
+ # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
+ for field, descriptor in self.fields_get(cr, user, context=context).iteritems():
+ if descriptor['type'] in ('one2many', 'many2many'):
+ continue
+ etree.SubElement(view, 'field', name=field)
+ if descriptor['type'] == 'text':
+ etree.SubElement(view, 'newline')
+ return view
- arch = ('<?xml version="1.0" encoding="utf-8"?>\n'
- '<calendar string="%s"') % (self._description)
+ def _get_default_tree_view(self, cr, user, context=None):
+ """ Generates a single-field tree view, using _rec_name if
+ it's one of the columns or the first column it finds otherwise
+
+ :param cr: database cursor
+ :param int user: user id
+ :param dict context: connection context
+ :returns: a tree view as an lxml document
+ :rtype: etree._Element
+ """
+ _rec_name = self._rec_name
+ if _rec_name not in self._columns:
+ _rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
+
+ view = etree.Element('tree', string=self._description)
+ etree.SubElement(view, 'field', name=_rec_name)
+ return view
+
+ def _get_default_calendar_view(self, cr, user, context=None):
+ """ Generates a default calendar view by trying to infer
+ calendar fields from a number of pre-set attribute names
+
+ :param cr: database cursor
+ :param int user: user id
+ :param dict context: connection context
+ :returns: a calendar view
+ :rtype: etree._Element
+ """
+ def set_first_of(seq, in_, to):
+ """Sets the first value of ``seq`` also found in ``in_`` to
+ the ``to`` attribute of the view being closed over.
+
+ Returns whether it's found a suitable value (and set it on
+ the attribute) or not
+ """
+ for item in seq:
+ if item in in_:
+ view.set(to, item)
+ return True
+ return False
+
+ view = etree.Element('calendar', string=self._description)
+ etree.SubElement(view, 'field', name=self._rec_name)
if (self._date_name not in self._columns):
date_found = False
if not date_found:
raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
+ view.set('date_start', self._date_name)
- if self._date_name:
- arch += ' date_start="%s"' % (self._date_name)
-
- for color in ["user_id", "partner_id", "x_user_id", "x_partner_id"]:
- if color in self._columns:
- arch += ' color="' + color + '"'
- break
-
- dt_stop_flag = False
-
- for dt_stop in ["date_stop", "date_end", "x_date_stop", "x_date_end"]:
- if dt_stop in self._columns:
- arch += ' date_stop="' + dt_stop + '"'
- dt_stop_flag = True
- break
-
- if not dt_stop_flag:
- for dt_delay in ["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"]:
- if dt_delay in self._columns:
- arch += ' date_delay="' + dt_delay + '"'
- break
+ set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
+ self._columns, 'color')
- arch += ('>\n'
- ' <field name="%s"/>\n'
- '</calendar>') % (self._rec_name)
+ if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
+ self._columns, 'date_stop'):
+ if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
+ self._columns, 'date_delay'):
+ raise except_orm(
+ _('Invalid Object Architecture!'),
+ _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % (self._name)))
- return arch
+ return view
- def __get_default_search_view(self, cr, uid, context=None):
+ def _get_default_search_view(self, cr, uid, context=None):
+ """
+ :param cr: database cursor
+ :param int user: user id
+ :param dict context: connection context
+ :returns: an lxml document of the view
+ :rtype: etree._Element
+ """
form_view = self.fields_view_get(cr, uid, False, 'form', context=context)
tree_view = self.fields_view_get(cr, uid, False, 'tree', context=context)
- fields_to_search = set()
# TODO it seems _all_columns could be used instead of fields_get (no need for translated fields info)
fields = self.fields_get(cr, uid, context=context)
- for field in fields:
- if fields[field].get('select'):
- fields_to_search.add(field)
+ fields_to_search = set(
+ field for field, descriptor in fields.iteritems()
+ if descriptor.get('select'))
+
for view in (form_view, tree_view):
view_root = etree.fromstring(view['arch'])
# Only care about select=1 in xpath below, because select=2 is covered
# by the custom advanced search in clients
- fields_to_search = fields_to_search.union(view_root.xpath("//field[@select=1]/@name"))
+ fields_to_search.update(view_root.xpath("//field[@select=1]/@name"))
tree_view_root = view_root # as provided by loop above
- search_view = etree.Element("search", attrib={'string': tree_view_root.get("string", "")})
- field_group = etree.Element("group")
- search_view.append(field_group)
+ search_view = etree.Element("search", string=tree_view_root.get("string", ""))
+ field_group = etree.SubElement(search_view, "group")
for field_name in fields_to_search:
- field_group.append(etree.Element("field", attrib={'name': field_name}))
+ etree.SubElement(field_group, "field", name=field_name)
- #TODO tostring can be removed as fromstring is call directly after...
- return etree.tostring(search_view, encoding="utf-8").replace('\t', '')
+ return search_view
#
# if view_id, view_type is not required
# if a view was found
if sql_res:
- result['type'] = sql_res['type']
- result['view_id'] = sql_res['id']
-
source = etree.fromstring(encode(sql_res['arch']))
- result['arch'] = apply_view_inheritance(cr, user, source, result['view_id'])
-
- result['name'] = sql_res['name']
- result['field_parent'] = sql_res['field_parent'] or False
+ result.update(
+ arch=apply_view_inheritance(cr, user, source, sql_res['id']),
+ type=sql_res['type'],
+ view_id=sql_res['id'],
+ name=sql_res['name'],
+ field_parent=sql_res['field_parent'] or False)
else:
-
# otherwise, build some kind of default view
- if view_type == 'form':
- # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
- res = self.fields_get(cr, user, context=context)
- xml = '<?xml version="1.0" encoding="utf-8"?> ' \
- '<form string="%s">' % (self._description,)
- for x in res:
- if res[x]['type'] not in ('one2many', 'many2many'):
- xml += '<field name="%s"/>' % (x,)
- if res[x]['type'] == 'text':
- xml += "<newline/>"
- xml += "</form>"
-
- elif view_type == 'tree':
- _rec_name = self._rec_name
- if _rec_name not in self._columns:
- _rec_name = self._columns.keys()[0]
- xml = '<?xml version="1.0" encoding="utf-8"?>' \
- '<tree string="%s"><field name="%s"/></tree>' \
- % (self._description, _rec_name)
-
- elif view_type == 'calendar':
- xml = self.__get_default_calendar_view()
-
- elif view_type == 'search':
- xml = self.__get_default_search_view(cr, user, context)
-
- else:
+ try:
+ view = getattr(self, '_get_default_%s_view' % view_type)(
+ cr, user, context)
+ except AttributeError:
# what happens here, graph case?
raise except_orm(_('Invalid Architecture!'), _("There is no view of type '%s' defined for the structure!") % view_type)
- result['arch'] = etree.fromstring(encode(xml))
- result['name'] = 'default'
- result['field_parent'] = False
- result['view_id'] = 0
+
+ result.update(
+ arch=view,
+ name='default',
+ field_parent=False,
+ view_id=0)
if parent_view_model != self._name:
ctx = context.copy()
resrelate = ir_values_obj.get(cr, user, 'action',
'client_action_relate', [(self._name, False)], False,
context)
- resprint = map(clean, resprint)
- resaction = map(clean, resaction)
- resaction = filter(lambda x: not x.get('multi', False), resaction)
- resprint = filter(lambda x: not x.get('multi', False), resprint)
+ resaction = [clean(action) for action in resaction
+ if view_type == 'tree' or not action[2].get('multi')]
+ resprint = [clean(print_) for print_ in resprint
+ if view_type == 'tree' or not print_[2].get('multi')]
resrelate = map(lambda x: x[2], resrelate)
- for x in resprint + resaction + resrelate:
+ for x in itertools.chain(resprint, resaction, resrelate):
x['string'] = x['name']
result['toolbar'] = {
if context is None:
context = {}
args = args[:]
- if name:
+ # optimize out the default criterion of ``ilike ''`` that matches everything
+ if not (name == '' and operator == 'ilike'):
args += [(self._rec_name, operator, name)]
access_rights_uid = name_get_uid or user
ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
def read_string(self, cr, uid, id, langs, fields=None, context=None):
res = {}
res2 = {}
- self.pool.get('ir.model.access').check(cr, uid, 'ir.translation', 'read')
+ self.pool.get('ir.translation').check_read(cr, uid)
if not fields:
fields = self._columns.keys() + self._inherit_fields.keys()
#FIXME: collect all calls to _get_source into one SQL call.
return res
def write_string(self, cr, uid, id, langs, vals, context=None):
- self.pool.get('ir.model.access').check(cr, uid, 'ir.translation', 'write')
+ self.pool.get('ir.translation').check_write(cr, uid)
#FIXME: try to only call the translation in one SQL
for lang in langs:
for field in vals:
except AttributeError:
pass
+
+ def _read_group_fill_results(self, cr, uid, domain, groupby, groupby_list, aggregated_fields,
+ read_group_result, read_group_order=None, context=None):
+ """Helper method for filling in empty groups for all possible values of
+ the field being grouped by"""
+
+ # self._group_by_full should map groupable fields to a method that returns
+ # a list of all aggregated values that we want to display for this field,
+ # in the form of a m2o-like pair (key,label).
+ # This is useful to implement kanban views for instance, where all columns
+ # should be displayed even if they don't contain any record.
+
+ # Grab the list of all groups that should be displayed, including all present groups
+ present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
+ all_groups = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
+ read_group_order=read_group_order,
+ access_rights_uid=openerp.SUPERUSER_ID,
+ context=context)
+
+ result_template = dict.fromkeys(aggregated_fields, False)
+ result_template.update({groupby + '_count':0})
+ if groupby_list and len(groupby_list) > 1:
+ result_template.update(__context={'group_by': groupby_list[1:]})
+
+ # Merge the left_side (current results as dicts) with the right_side (all
+ # possible values as m2o pairs). Both lists are supposed to be using the
+ # same ordering, and can be merged in one pass.
+ result = []
+ known_values = {}
+ def append_left(left_side):
+ grouped_value = left_side[groupby] and left_side[groupby][0]
+ if not grouped_value in known_values:
+ result.append(left_side)
+ known_values[grouped_value] = left_side
+ else:
+ count_attr = groupby + '_count'
+ known_values[grouped_value].update({count_attr: left_side[count_attr]})
+ def append_right(right_side):
+ grouped_value = right_side[0]
+ if not grouped_value in known_values:
+ line = dict(result_template)
+ line.update({
+ groupby: right_side,
+ '__domain': [(groupby,'=',grouped_value)] + domain,
+ })
+ result.append(line)
+ known_values[grouped_value] = line
+ while read_group_result or all_groups:
+ left_side = read_group_result[0] if read_group_result else None
+ right_side = all_groups[0] if all_groups else None
+ assert left_side is None or left_side[groupby] is False \
+ or isinstance(left_side[groupby], (tuple,list)), \
+ 'M2O-like pair expected, got %r' % left_side[groupby]
+ assert right_side is None or isinstance(right_side, (tuple,list)), \
+ 'M2O-like pair expected, got %r' % right_side
+ if left_side is None:
+ append_right(all_groups.pop(0))
+ elif right_side is None:
+ append_left(read_group_result.pop(0))
+ elif left_side[groupby] == right_side:
+ append_left(read_group_result.pop(0))
+ all_groups.pop(0) # discard right_side
+ elif not left_side[groupby] or not left_side[groupby][0]:
+ # left side == "Undefined" entry, not present on right_side
+ append_left(read_group_result.pop(0))
+ else:
+ append_right(all_groups.pop(0))
+ return result
+
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
"""
Get the list of records in list view grouped by the given ``groupby`` fields
"""
context = context or {}
- self.pool.get('ir.model.access').check(cr, uid, self._name, 'read')
+ self.check_read(cr, uid)
if not fields:
fields = self._columns.keys()
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fget = self.fields_get(cr, uid, fields)
- float_int_fields = filter(lambda x: fget[x]['type'] in ('float', 'integer'), fields)
flist = ''
group_count = group_by = groupby
if groupby:
raise except_orm(_('Invalid group_by'),
_('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(groupby,))
-
- fields_pre = [f for f in float_int_fields if
- f == self.CONCURRENCY_CHECK_FIELD
- or (f in self._columns and getattr(self._columns[f], '_classic_write'))]
- for f in fields_pre:
- if f not in ['id', 'sequence']:
- group_operator = fget[f].get('group_operator', 'sum')
- if flist:
- flist += ', '
- qualified_field = '"%s"."%s"' % (self._table, f)
- flist += "%s(%s) AS %s" % (group_operator, qualified_field, f)
+ aggregated_fields = [
+ f for f in fields
+ if f not in ('id', 'sequence')
+ if fget[f]['type'] in ('integer', 'float')
+ if (f in self._columns and getattr(self._columns[f], '_classic_write'))]
+ for f in aggregated_fields:
+ group_operator = fget[f].get('group_operator', 'sum')
+ if flist:
+ flist += ', '
+ qualified_field = '"%s"."%s"' % (self._table, f)
+ flist += "%s(%s) AS %s" % (group_operator, qualified_field, f)
gb = groupby and (' GROUP BY ' + qualified_groupby_field) or ''
alldata[r['id']] = r
del r['id']
- data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=orderby or groupby, context=context)
+ order = orderby or groupby
+ data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=order, context=context)
# the IDS of records that have groupby field value = False or '' should be sorted too
data_ids += filter(lambda x:x not in data_ids, alldata.keys())
data = self.read(cr, uid, data_ids, groupby and [groupby] or ['id'], context=context)
del alldata[d['id']][groupby]
d.update(alldata[d['id']])
del d['id']
+
+ if groupby and groupby in self._group_by_full:
+ data = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
+ aggregated_fields, data, read_group_order=order,
+ context=context)
+
return data
def _inherits_join_add(self, current_table, parent_model_name, query):
if f_obj_type:
ok = False
casts = [
- ('text', 'char', 'VARCHAR(%d)' % (f.size or 0,), '::VARCHAR(%d)'%(f.size or 0,)),
+ ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
('varchar', 'text', 'TEXT', ''),
('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
]
if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size < f.size:
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
- cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" VARCHAR(%d)' % (self._table, k, f.size))
- cr.execute('UPDATE "%s" SET "%s"=temp_change_size::VARCHAR(%d)' % (self._table, k, f.size))
+ cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
+ cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
self.__schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
def _select_column_data(self, cr):
+ # attlen is the number of bytes necessary to represent the type when
+ # the type has a fixed size. If the type has a varying size attlen is
+ # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
+ # Thus the query can return a negative size for a unlimited varchar.
cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size " \
"FROM pg_class c,pg_attribute a,pg_type t " \
"WHERE c.relname=%s " \
if context is None:
context = {}
- ira = self.pool.get('ir.model.access')
- write_access = ira.check(cr, user, self._name, 'write', False) or \
- ira.check(cr, user, self._name, 'create', False)
+ write_access = self.check_write(cr, user, False) or \
+ self.check_create(cr, user, False)
res = {}
if allfields and f not in allfields:
continue
- res[f] = fields.field_to_dict(self, cr, user, context, field)
+ res[f] = fields.field_to_dict(self, cr, user, field, context=context)
if not write_access:
res[f]['readonly'] = True
if not context:
context = {}
- self.pool.get('ir.model.access').check(cr, user, self._name, 'read')
+ self.check_read(cr, user)
if not fields:
fields = list(set(self._columns.keys() + self._inherit_fields.keys()))
if isinstance(ids, (int, long)):
# mention the first one only to keep the error message readable
raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
+ def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
+ """Verifies that the operation given by ``operation`` is allowed for the user
+ according to the access rights."""
+ return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
+
+ def check_create(self, cr, uid, raise_exception=True):
+ return self.check_access_rights(cr, uid, 'create', raise_exception)
+
+ def check_read(self, cr, uid, raise_exception=True):
+ return self.check_access_rights(cr, uid, 'read', raise_exception)
+
+ def check_unlink(self, cr, uid, raise_exception=True):
+ return self.check_access_rights(cr, uid, 'unlink', raise_exception)
+
+ def check_write(self, cr, uid, raise_exception=True):
+ return self.check_access_rights(cr, uid, 'write', raise_exception)
+
def check_access_rule(self, cr, uid, ids, operation, context=None):
"""Verifies that the operation given by ``operation`` is allowed for the user
according to ir.rules.
if uid == SUPERUSER_ID:
return
- if self.is_transient:
+ if self.is_transient():
# Only one single implicit access rule for transient models: owner only!
# This is ok to hardcode because we assert that TransientModels always
# have log_access enabled and this the create_uid column is always there.
WHERE id IN %%s""" % self._table, (tuple(ids),))
uids = [x[0] for x in cr.fetchall()]
if len(uids) != 1 or uids[0] != uid:
- raise orm.except_orm(_('AccessError'), '%s access is '
+ raise except_orm(_('AccessError'), '%s access is '
'restricted to your own records for transient models '
'(except for the super-user).' % operation.capitalize())
else:
self._check_concurrency(cr, ids, context)
- self.pool.get('ir.model.access').check(cr, uid, self._name, 'unlink')
+ self.check_unlink(cr, uid)
properties = self.pool.get('ir.property')
domain = [('res_id', '=', False),
if readonly[0][0] >= 1:
edit = True
break
- elif readonly[0][0] == 0:
- edit = False
- else:
- edit = False
if not edit:
vals.pop(field)
ids = [ids]
self._check_concurrency(cr, ids, context)
- self.pool.get('ir.model.access').check(cr, user, self._name, 'write')
+ self.check_write(cr, user)
result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
for id in ids:
result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
+ unknown_fields = updend[:]
for table in self._inherits:
col = self._inherits[table]
nids = []
for val in updend:
if self._inherit_fields[val][0] == table:
v[val] = vals[val]
+ unknown_fields.remove(val)
if v:
self.pool.get(table).write(cr, user, nids, v, context)
+ if unknown_fields:
+ self.__logger.warn(
+ 'No such field(s) in model %s: %s.',
+ self._name, ', '.join(unknown_fields))
self._validate(cr, user, ids, context)
# TODO: use _order to set dest at the right position and not first node of parent
if self.is_transient():
self._transient_vacuum(cr, user)
- self.pool.get('ir.model.access').check(cr, user, self._name, 'create')
+ self.check_create(cr, user)
vals = self._add_missing_default_values(cr, user, vals, context)
tocreate[v] = {'id': vals[self._inherits[v]]}
(upd0, upd1, upd2) = ('', '', [])
upd_todo = []
+ unknown_fields = []
for v in vals.keys():
- if v in self._inherit_fields:
+ if v in self._inherit_fields and v not in self._columns:
(table, col, col_detail, original_parent) = self._inherit_fields[v]
tocreate[table][v] = vals[v]
del vals[v]
else:
if (v not in self._inherit_fields) and (v not in self._columns):
del vals[v]
+ unknown_fields.append(v)
+ if unknown_fields:
+ self.__logger.warn(
+ 'No such field(s) in model %s: %s.',
+ self._name, ', '.join(unknown_fields))
# Try-except added to filter the creation of those records whose filds are readonly.
# Example : any dashboard which has all the fields readonly.(due to Views(database views))
"""
if context is None:
context = {}
- self.pool.get('ir.model.access').check(cr, access_rights_uid or user, self._name, 'read')
+ self.check_read(cr, access_rights_uid or user)
# For transient models, restrict acces to the current user, except for the super-user
if self.is_transient() and self._log_access and user != SUPERUSER_ID:
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
- :return: True
+ :return: id of the newly created record
"""
if context is None:
return False
return True
- def _get_xml_ids(self, cr, uid, ids, *args, **kwargs):
- """Find out the XML ID(s) of any database record.
+ def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
+ """Retrieve the External ID(s) of any database record.
**Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
- :return: map of ids to the list of their fully qualified XML IDs
- (empty list when there's none).
+ :return: map of ids to the list of their fully qualified External IDs
+ in the form ``module.key``, or an empty list when there's no External
+ ID for a record, e.g.::
+
+ { 'id': ['module.ext_id', 'module.ext_id_bis'],
+ 'id2': [] }
"""
- model_data_obj = self.pool.get('ir.model.data')
- data_ids = model_data_obj.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
- data_results = model_data_obj.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
+ ir_model_data = self.pool.get('ir.model.data')
+ data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
+ data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
result = {}
for id in ids:
# can't use dict.fromkeys() as the list would be shared!
result[record['res_id']].append('%(module)s.%(name)s' % record)
return result
- def get_xml_id(self, cr, uid, ids, *args, **kwargs):
- """Find out the XML ID of any database record, if there
+ def get_external_id(self, cr, uid, ids, *args, **kwargs):
+ """Retrieve the External ID of any database record, if there
is one. This method works as a possible implementation
for a function field, to be able to add it to any
- model object easily, referencing it as ``osv.osv.get_xml_id``.
+ model object easily, referencing it as ``Model.get_external_id``.
- When multiple XML IDs exist for a record, only one
+ When multiple External IDs exist for a record, only one
of them is returned (randomly).
- **Synopsis**: ``get_xml_id(cr, uid, ids) -> { 'id': 'module.xml_id' }``
-
:return: map of ids to their fully qualified XML ID,
defaulting to an empty string when there's none
- (to be usable as a function field).
+ (to be usable as a function field),
+ e.g.::
+
+ { 'id': 'module.ext_id',
+ 'id2': '' }
"""
results = self._get_xml_ids(cr, uid, ids)
- for k, v in results.items():
+ for k, v in results.iteritems():
if results[k]:
results[k] = v[0]
else:
results[k] = ''
return results
+ # backwards compatibility
+ get_xml_id = get_external_id
+ _get_xml_ids = _get_external_ids
+
# Transience
def is_transient(self):
""" Return whether the model is transient.
return True
+ def resolve_o2m_commands_to_record_dicts(self, cr, uid, field_name, o2m_commands, fields=None, context=None):
+ """ Serializes o2m commands into record dictionaries (as if
+ all the o2m records came from the database via a read()), and
+ returns an iterable over these dictionaries.
+
+ Because o2m commands might be creation commands, not all
+ record ids will contain an ``id`` field. Commands matching an
+ existing record (``UPDATE`` and ``LINK_TO``) will have an id.
+
+ .. note:: ``CREATE``, ``UPDATE`` and ``LINK_TO`` stand for the
+ o2m command codes ``0``, ``1`` and ``4``
+ respectively
+
+ :param field_name: name of the o2m field matching the commands
+ :type field_name: str
+ :param o2m_commands: one2many commands to execute on ``field_name``
+ :type o2m_commands: list((int|False, int|False, dict|False))
+ :param fields: list of fields to read from the database, when applicable
+ :type fields: list(str)
+ :raises AssertionError: if a command is not ``CREATE``, ``UPDATE`` or ``LINK_TO``
+ :returns: o2m records in a shape similar to that returned by
+ ``read()`` (except records may be missing the ``id``
+ field if they don't exist in db)
+ :rtype: ``list(dict)``
+ """
+ o2m_model = self._all_columns[field_name].column._obj
+
+ # convert single ids and pairs to tripled commands
+ commands = []
+ for o2m_command in o2m_commands:
+ if not isinstance(o2m_command, (list, tuple)):
+ command = 4
+ commands.append((command, o2m_command, False))
+ elif len(o2m_command) == 1:
+ (command,) = o2m_command
+ commands.append((command, False, False))
+ elif len(o2m_command) == 2:
+ command, id = o2m_command
+ commands.append((command, id, False))
+ else:
+ command = o2m_command[0]
+ commands.append(o2m_command)
+ assert command in (0, 1, 4), \
+ "Only CREATE, UPDATE and LINK_TO commands are supported in resolver"
+
+ # extract records to read, by id, in a mapping dict
+ ids_to_read = [id for (command, id, _) in commands if command in (1, 4)]
+ records_by_id = dict(
+ (record['id'], record)
+ for record in self.pool.get(o2m_model).read(
+ cr, uid, ids_to_read, fields=fields, context=context))
+
+ record_dicts = []
+ # merge record from db with record provided by command
+ for command, id, record in commands:
+ item = {}
+ if command in (1, 4): item.update(records_by_id[id])
+ if command in (0, 1): item.update(record)
+ record_dicts.append(item)
+ return record_dicts
+
# keep this import here, at top it will cause dependency cycle errors
import expression
class TransientModel(BaseModel):
"""Model super-class for transient records, meant to be temporarily
persisted, and regularly vaccuum-cleaned.
-
+
A TransientModel has a simplified access rights management,
all users can create new records, and may only access the
records they created. The super-user has unrestricted access
_register = False # not visible in ORM registry, meant to be python-inherited only
-# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
\ No newline at end of file
+# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: