[MERGE] sync with trunk
authorOlivier Dony <odo@openerp.com>
Thu, 22 Dec 2011 10:25:55 +0000 (11:25 +0100)
committerOlivier Dony <odo@openerp.com>
Thu, 22 Dec 2011 10:25:55 +0000 (11:25 +0100)
bzr revid: odo@openerp.com-20111222102555-bydsozbdu6urkj31

1  2 
openerp/addons/base/base.sql
openerp/addons/base/ir/ir.xml
openerp/addons/base/ir/ir_model.py
openerp/osv/fields.py
openerp/osv/orm.py
setup.py

@@@ -49,8 -49,6 +49,8 @@@ CREATE TABLE ir_model_fields 
    primary key(id)
  );
  
 +ALTER TABLE ir_model_fields ADD column serialization_field_id int references ir_model_fields on delete cascade;
 +
  
  -------------------------------------------------------------------------
  -- Actions
@@@ -289,6 -287,7 +289,7 @@@ CREATE TABLE ir_module_module 
      name character varying(128) NOT NULL,
      author character varying(128),
      url character varying(128),
+     icon character varying(64),
      state character varying(16),
      latest_version character varying(64),
      shortdesc character varying(256),
      category_id integer REFERENCES ir_module_category ON DELETE SET NULL,
      certificate character varying(64),
      description text,
+     application boolean default False,
      demo boolean default False,
      web boolean DEFAULT FALSE,
      license character varying(32),
          <menuitem id="menu_res_company_global"
              parent="base.menu_administration"
              name="Companies"
-             sequence="5"/>
+             sequence="2"/>
  
          <record id="action_res_company_tree" model="ir.actions.act_window">
              <field name="name">Company's Structure</field>
                                          <field name="selection" attrs="{'required': [('ttype','in',['selection','reference'])], 'readonly': [('ttype','not in',['selection','reference'])]}"/>
                                          <field name="size" attrs="{'required': [('ttype','in',['char','reference'])], 'readonly': [('ttype','not in',['char','reference'])]}"/>
                                          <field name="domain" attrs="{'readonly': [('relation','=','')]}"/>
 +                                        <field name="model_id" invisible="1"/>
 +                                        <field name="serialization_field_id" attrs="{'readonly': [('state','=','base')]}" domain = "[('ttype','=','serialized'), ('model_id', '=', model_id)]"/>
                                      </group>
                                      <group colspan="2" col="2">
                                          <field name="required"/>
                                  <field name="selection" attrs="{'required': [('ttype','in',['selection','reference'])], 'readonly': [('ttype','not in',['selection','reference'])]}"/>
                                  <field name="size" attrs="{'required': [('ttype','in',['char','reference'])], 'readonly': [('ttype','not in',['char','reference'])]}"/>
                                  <field name="domain" attrs="{'readonly': [('relation','=','')]}"/>
 +                                <field name="model_id" invisible="1"/>
 +                                <field name="serialization_field_id" attrs="{'readonly': [('state','=','base')]}" domain = "[('ttype','=','serialized'), ('model_id', '=', model_id)]"/>
                              </group>
  
                              <group colspan="2" col="2">
              <field name="view_id" ref="ir_cron_view_tree"/>
          </record>
  
-         <menuitem id="next_id_10" name="Scheduler" parent="base.menu_config"  groups="base.group_extended"/>
+         <menuitem id="next_id_10" name="Scheduler" parent="base.menu_config"  groups="base.group_extended" sequence="11"/>
          <menuitem action="ir_cron_act" id="menu_ir_cron_act" parent="next_id_10"/>
  
  
              <field name="view_type">form</field>
              <field name="help">The configuration wizards are used to help you configure a new instance of OpenERP. They are launched during the installation of new modules, but you can choose to restart some wizards manually from this menu.</field>
          </record>
-         <menuitem id="next_id_11" name="Configuration Wizards" parent="base.menu_config" sequence="1"/>
+         <menuitem id="next_id_11" name="Configuration Wizards" parent="base.menu_config" sequence="2"
+             groups="base.group_extended"/>
  
          <menuitem action="act_ir_actions_todo_form" id="menu_ir_actions_todo_form"
                  parent="next_id_11" sequence="20"/>
@@@ -161,7 -161,7 +161,7 @@@ class ir_model(osv.osv)
              #pooler.restart_pool(cr.dbname)
          return res
  
-     def instanciate(self, cr, user, model, context={}):
+     def instanciate(self, cr, user, model, context=None):
          class x_custom_model(osv.osv):
              pass
          x_custom_model._name = model
@@@ -207,7 -207,6 +207,7 @@@ class ir_model_fields(osv.osv)
          'view_load': fields.boolean('View Auto-Load'),
          'selectable': fields.boolean('Selectable'),
          'modules': fields.function(_in_modules, method=True, type='char', size=128, string='In modules', help='List of modules in which the field is defined'),
 +        'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]", ondelete='cascade'),
      }
      _rec_name='field_description'
      _defaults = {
          if context and context.get('manual',False):
              vals['state'] = 'manual'
  
 +        #For the moment renaming a sparse field or changing the storing system is not allowed. This will be done later
 +        if 'serialization_field_id' in vals or 'name' in vals:
 +            for field in self.browse(cr, user, ids, context=context):
 +                if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
 +                    raise except_orm(_('Error!'),  _('Changing the storing system for the field "%s" is not allowed.'%field.name))
 +                if field.serialization_field_id and (field.name != vals['name']):
 +                    raise except_orm(_('Error!'),  _('Renaming the sparse field "%s" is not allowed'%field.name))           
 +                
          column_rename = None # if set, *one* column can be renamed here
          obj = None
          models_patch = {}    # structs of (obj, [(field, prop, change_to),..])
diff --combined openerp/osv/fields.py
@@@ -45,7 -45,6 +45,7 @@@ import opener
  import openerp.netsvc as netsvc
  import openerp.tools as tools
  from openerp.tools.translate import _
 +import json
  
  def _symbol_set(symb):
      if symb == None or symb == False:
@@@ -1016,10 -1015,10 +1016,10 @@@ class function(_column)
                  result = (value, dict_names[value])
  
          if field_type == 'binary':
-             if context.get('bin_size', False):
+             if context.get('bin_size'):
                  # client requests only the size of binary fields
                  result = get_nice_size(value)
-             else:
+             elif not context.get('bin_raw'):
                  result = sanitize_binary_value(value)
  
          if field_type in ("integer","integer_big") and value > xmlrpclib.MAXINT:
@@@ -1167,106 -1166,20 +1167,108 @@@ class related(function)
      def _field_get2(self, cr, uid, obj, context=None):
          if self._relations:
              return
+         result = []
          obj_name = obj._name
          for i in range(len(self._arg)):
              f = obj.pool.get(obj_name).fields_get(cr, uid, [self._arg[i]], context=context)[self._arg[i]]
-             self._relations.append({
+             result.append({
                  'object': obj_name,
                  'type': f['type']
  
              })
              if f.get('relation',False):
                  obj_name = f['relation']
-                 self._relations[-1]['relation'] = f['relation']
+                 result[-1]['relation'] = f['relation']
+         self._relations = result
  
 +
 +class sparse(function):   
 +
 +    def convert_value(self, obj, cr, uid, record, value, read_value, context=None):        
 +        """
 +            + For a many2many field, a list of tuples is expected.
 +              Here is the list of tuple that are accepted, with the corresponding semantics ::
 +
 +                 (0, 0,  { values })    link to a new record that needs to be created with the given values dictionary
 +                 (1, ID, { values })    update the linked record with id = ID (write *values* on it)
 +                 (2, ID)                remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
 +                 (3, ID)                cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
 +                 (4, ID)                link to existing record with id = ID (adds a relationship)
 +                 (5)                    unlink all (like using (3,ID) for all linked records)
 +                 (6, 0, [IDs])          replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
 +
 +                 Example:
 +                    [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
 +
 +            + For a one2many field, a lits of tuples is expected.
 +              Here is the list of tuple that are accepted, with the corresponding semantics ::
 +
 +                 (0, 0,  { values })    link to a new record that needs to be created with the given values dictionary
 +                 (1, ID, { values })    update the linked record with id = ID (write *values* on it)
 +                 (2, ID)                remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
 +
 +                 Example:
 +                    [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
 +        """
 +
 +        if self._type == 'many2many':
 +            assert value[0][0] == 6, 'Unsupported m2m value for sparse field: %s' % value
 +            return value[0][2]
 +
 +        elif self._type == 'one2many':
 +            if not read_value:
 +                read_value = []
 +            relation_obj = obj.pool.get(self.relation)
 +            for vals in value:
 +                assert vals[0] in (0,1,2), 'Unsupported o2m value for sparse field: %s' % vals
 +                if vals[0] == 0:
 +                    read_value.append(relation_obj.create(cr, uid, vals[2], context=context))
 +                elif vals[0] == 1:
 +                    relation_obj.write(cr, uid, vals[1], vals[2], context=context)
 +                elif vals[0] == 2:
 +                    relation_obj.unlink(cr, uid, vals[1], context=context)
 +                    read_value.remove(vals[1])
 +            return read_value
 +        return value
 +
 +
 +    def _fnct_write(self,obj,cr, uid, ids, field_name, value, args, context=None):
 +        if not type(ids) == list:
 +            ids = [ids]
 +        records = obj.browse(cr, uid, ids, context=context)
 +        for record in records:
 +            # grab serialized value as object - already deserialized
 +            serialized = getattr(record, self.serialization_field)
 +            if value is None:
 +                # simply delete the key to unset it.
 +                serialized.pop(field_name, None)
 +            else: 
 +                serialized[field_name] = self.convert_value(obj, cr, uid, record, value, serialized.get(field_name), context=context)
 +            obj.write(cr, uid, ids, {self.serialization_field: serialized}, context=context)
 +        return True
 +
 +    def _fnct_read(self, obj, cr, uid, ids, field_names, args, context=None):
 +        results = {}
 +        records = obj.browse(cr, uid, ids, context=context)
 +        for record in records:
 +            # grab serialized value as object - already deserialized
 +            serialized = getattr(record, self.serialization_field)
 +            results[record.id] = {}
 +            for field_name in field_names:
 +                if obj._columns[field_name]._type in ['one2many']:
 +                    value = serialized.get(field_name, [])
 +                else:
 +                    results[record.id].update(field_name=value)
 +        return results
 +
 +    def __init__(self, serialization_field, **kwargs):
 +        self.serialization_field = serialization_field
 +        return super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='__sparse_multi', method=True, **kwargs)
 +     
 +
 +
 +
 +
  # ---------------------------------------------------------
  # Dummy fields
  # ---------------------------------------------------------
@@@ -1289,26 -1202,14 +1291,26 @@@ class dummy(function)
  # ---------------------------------------------------------
  # Serialized fields
  # ---------------------------------------------------------
 +
  class serialized(_column):
 -    def __init__(self, string='unknown', serialize_func=repr, deserialize_func=eval, type='text', **args):
 -        self._serialize_func = serialize_func
 -        self._deserialize_func = deserialize_func
 -        self._type = type
 -        self._symbol_set = (self._symbol_c, self._serialize_func)
 -        self._symbol_get = self._deserialize_func
 -        super(serialized, self).__init__(string=string, **args)
 +    """ A field able to store an arbitrary python data structure.
 +    
 +        Note: only plain components allowed.
 +    """
 +    
 +    def _symbol_set_struct(val):
 +        return json.dumps(val)
 +
 +    def _symbol_get_struct(self, val):
 +        return json.loads(val or '{}')
 +    
 +    _prefetch = False
 +    _type = 'serialized'
 +
 +    _symbol_c = '%s'
 +    _symbol_f = _symbol_set_struct
 +    _symbol_set = (_symbol_c, _symbol_f)
 +    _symbol_get = _symbol_get_struct
  
  # TODO: review completly this class for speed improvement
  class property(function):
diff --combined openerp/osv/orm.py
@@@ -51,7 -51,6 +51,6 @@@ import pickl
  import re
  import simplejson
  import time
- import traceback
  import types
  import warnings
  from lxml import etree
@@@ -336,8 -335,8 +335,8 @@@ class browse_record(object)
          cache.setdefault(table._name, {})
          self._data = cache[table._name]
  
-         if not (id and isinstance(id, (int, long,))):
-             raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
+ #        if not (id and isinstance(id, (int, long,))):
+ #            raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
  #        if not table.exists(cr, uid, id, context):
  #            raise BrowseRecordError(_('Object %s does not exists') % (self,))
  
@@@ -552,7 -551,6 +551,7 @@@ FIELDS_TO_PGTYPES = 
      fields.datetime: 'timestamp',
      fields.binary: 'bytea',
      fields.many2one: 'int4',
 +    fields.serialized: 'text',
  }
  
  def get_pg_type(f, type_override=None):
@@@ -755,11 -753,7 +754,11 @@@ class BaseModel(object)
          for rec in cr.dictfetchall():
              cols[rec['name']] = rec
  
 -        for (k, f) in self._columns.items():
 +        ir_model_fields_obj = self.pool.get('ir.model.fields')
 +
 +        # sparse field should be created at the end, as it depends on its serialized field already existing
 +        fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
 +        for (k, f) in fields:
              vals = {
                  'model_id': model_id,
                  'model': self._name,
                  'selectable': (f.selectable and 1) or 0,
                  'translate': (f.translate and 1) or 0,
                  'relation_field': (f._type=='one2many' and isinstance(f, fields.one2many)) and f._fields_id or '',
 +                'serialization_field_id': None,
              }
 +            if getattr(f, 'serialization_field', None):
 +                # resolve link to serialization_field if specified by name
 +                serialization_field_id = ir_model_fields_obj.search(cr, 1, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
 +                if not serialization_field_id:
 +                    raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
 +                vals['serialization_field_id'] = serialization_field_id[0]
 +
              # When its a custom field,it does not contain f.select
              if context.get('field_state', 'base') == 'manual':
                  if context.get('field_name', '') == k:
                  vals['id'] = id
                  cr.execute("""INSERT INTO ir_model_fields (
                      id, model_id, model, name, field_description, ttype,
 -                    relation,view_load,state,select_level,relation_field, translate
 +                    relation,view_load,state,select_level,relation_field, translate, serialization_field_id
                  ) VALUES (
 -                    %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
 +                    %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
                  )""", (
                      id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
                       vals['relation'], bool(vals['view_load']), 'base',
 -                    vals['select_level'], vals['relation_field'], bool(vals['translate'])
 +                    vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
                  ))
                  if 'module' in context:
                      name1 = 'field_' + self._table + '_' + k
                          cr.commit()
                          cr.execute("""UPDATE ir_model_fields SET
                              model_id=%s, field_description=%s, ttype=%s, relation=%s,
 -                            view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s
 +                            view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
                          WHERE
                              model=%s AND name=%s""", (
                                  vals['model_id'], vals['field_description'], vals['ttype'],
                                  vals['relation'], bool(vals['view_load']),
 -                                vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['model'], vals['name']
 +                                vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
                              ))
                          break
          cr.commit()
                      #'select': int(field['select_level'])
                  }
  
 +                if field['serialization_field_id']:
 +                    cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
 +                    attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
 +                    if field['ttype'] in ['many2one', 'one2many', 'many2many']:
 +                        attrs.update({'relation': field['relation']})
 +                    self._columns[field['name']] = fields.sparse(**attrs)
                  if field['ttype'] == 'selection':
                      self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
                  elif field['ttype'] == 'reference':
          if not context:
              context = {}
          fields = map(fix_import_export_id_paths, fields)
-         logger = netsvc.Logger()
          ir_model_data_obj = self.pool.get('ir.model.data')
  
          # mode: id (XML id) or .id (database id) or False for name_get
                  # ID of the record using a XML ID
                  if field_name == 'id':
                      try:
-                         data_res_id = _get_id(model_name, line[i], current_module, 'id')
+                         data_res_id = _get_id(model_name, line[i], current_module)
                      except ValueError:
                          pass
                      xml_id = line[i]
  
                  row[field_name] = res or False
  
-             result = (row, nbrmax, warning, data_res_id, xml_id)
-             return result
+             return row, nbrmax, warning, data_res_id, xml_id
  
          fields_def = self.fields_get(cr, uid, context=context)
  
-         if config.get('import_partial', False) and filename:
-             data = pickle.load(file(config.get('import_partial')))
          position = 0
-         while position<len(datas):
-             res = {}
+         if config.get('import_partial') and filename:
+             with open(config.get('import_partial'), 'rb') as partial_import_file:
+                 data = pickle.load(partial_import_file)
+                 position = data.get(filename, 0)
  
+         while position<len(datas):
              (res, position, warning, res_id, xml_id) = \
                      process_liness(self, datas, [], current_module, self._name, fields_def, position=position)
              if len(warning):
                  cr.rollback()
-                 return (-1, res, 'Line ' + str(position) +' : ' + '!\n'.join(warning), '')
+                 return -1, res, 'Line ' + str(position) +' : ' + '!\n'.join(warning), ''
  
              try:
                  ir_model_data_obj._update(cr, uid, self._name,
                       current_module, res, mode=mode, xml_id=xml_id,
                       noupdate=noupdate, res_id=res_id, context=context)
              except Exception, e:
-                 return (-1, res, 'Line ' + str(position) + ' : ' + tools.ustr(e), '')
+                 return -1, res, 'Line ' + str(position) + ' : ' + tools.ustr(e), ''
  
-             if config.get('import_partial', False) and filename and (not (position%100)):
-                 data = pickle.load(file(config.get('import_partial')))
+             if config.get('import_partial') and filename and (not (position%100)):
+                 with open(config.get('import_partial'), 'rb') as partial_import:
+                     data = pickle.load(partial_import)
                  data[filename] = position
-                 pickle.dump(data, file(config.get('import_partial'), 'wb'))
+                 with open(config.get('import_partial'), 'wb') as partial_import:
+                     pickle.dump(data, partial_import)
                  if context.get('defer_parent_store_computation'):
                      self._parent_store_compute(cr)
                  cr.commit()
  
          if context.get('defer_parent_store_computation'):
              self._parent_store_compute(cr)
-         return (position, 0, 0, 0)
+         return position, 0, 0, 0
  
      def get_invalid_fields(self, cr, uid):
          return list(self._invalids)
          except AttributeError:
              pass
  
+     def _read_group_fill_results(self, cr, uid, domain, groupby, groupby_list, aggregated_fields,
+                                  read_group_result, read_group_order=None, context=None):
+         """Helper method for filling in empty groups for all possible values of
+            the field being grouped by"""
+         # self._group_by_full should map groupable fields to a method that returns
+         # a list of all aggregated values that we want to display for this field,
+         # in the form of a m2o-like pair (key,label).
+         # This is useful to implement kanban views for instance, where all columns
+         # should be displayed even if they don't contain any record.
+         # Grab the list of all groups that should be displayed, including all present groups 
+         present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
+         all_groups = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
+                                                   read_group_order=read_group_order,
+                                                   access_rights_uid=openerp.SUPERUSER_ID,
+                                                   context=context)
+         result_template = dict.fromkeys(aggregated_fields, False)
+         result_template.update({groupby + '_count':0})
+         if groupby_list and len(groupby_list) > 1:
+             result_template.update(__context={'group_by': groupby_list[1:]})
+         # Merge the left_side (current results as dicts) with the right_side (all
+         # possible values as m2o pairs). Both lists are supposed to be using the
+         # same ordering, and can be merged in one pass.
+         result = []
+         known_values = {}
+         def append_left(left_side):
+             grouped_value = left_side[groupby] and left_side[groupby][0]
+             if not grouped_value in known_values:
+                 result.append(left_side)
+                 known_values[grouped_value] = left_side
+             else:
+                 count_attr = groupby + '_count'
+                 known_values[grouped_value].update({count_attr: left_side[count_attr]})
+         def append_right(right_side):
+             grouped_value = right_side[0]
+             if not grouped_value in known_values:
+                 line = dict(result_template)
+                 line.update({
+                     groupby: right_side,
+                     '__domain': [(groupby,'=',grouped_value)] + domain,
+                 })
+                 result.append(line)
+                 known_values[grouped_value] = line
+         while read_group_result or all_groups:
+             left_side = read_group_result[0] if read_group_result else None
+             right_side = all_groups[0] if all_groups else None
+             assert left_side is None or left_side[groupby] is False \
+                  or isinstance(left_side[groupby], (tuple,list)), \
+                 'M2O-like pair expected, got %r' % left_side[groupby]
+             assert right_side is None or isinstance(right_side, (tuple,list)), \
+                 'M2O-like pair expected, got %r' % right_side
+             if left_side is None:
+                 append_right(all_groups.pop(0))
+             elif right_side is None:
+                 append_left(read_group_result.pop(0))
+             elif left_side[groupby] == right_side:
+                 append_left(read_group_result.pop(0))
+                 all_groups.pop(0) # discard right_side
+             elif not left_side[groupby] or not left_side[groupby][0]:
+                 # left side == "Undefined" entry, not present on right_side
+                 append_left(read_group_result.pop(0))
+             else:
+                 append_right(all_groups.pop(0))
+         return result
      def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
          """
          Get the list of records in list view grouped by the given ``groupby`` fields
  
          # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
          fget = self.fields_get(cr, uid, fields)
-         float_int_fields = filter(lambda x: fget[x]['type'] in ('float', 'integer'), fields)
          flist = ''
          group_count = group_by = groupby
          if groupby:
              if fget.get(groupby):
-                 if fget[groupby]['type'] in ('date', 'datetime'):
-                     flist = "to_char(%s,'yyyy-mm') as %s " % (qualified_groupby_field, groupby)
-                     groupby = "to_char(%s,'yyyy-mm')" % (qualified_groupby_field)
-                     qualified_groupby_field = groupby
+                 groupby_type = fget[groupby]['type']
+                 if groupby_type in ('date', 'datetime'):
+                     qualified_groupby_field = "to_char(%s,'yyyy-mm')" % qualified_groupby_field
+                     flist = "%s as %s " % (qualified_groupby_field, groupby)
+                 elif groupby_type == 'boolean':
+                     qualified_groupby_field = "coalesce(%s,false)" % qualified_groupby_field
+                     flist = "%s as %s " % (qualified_groupby_field, groupby)
                  else:
                      flist = qualified_groupby_field
              else:
                  raise except_orm(_('Invalid group_by'),
                                   _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(groupby,))
  
-         fields_pre = [f for f in float_int_fields if
-                    f == self.CONCURRENCY_CHECK_FIELD
-                 or (f in self._columns and getattr(self._columns[f], '_classic_write'))]
-         for f in fields_pre:
-             if f not in ['id', 'sequence']:
-                 group_operator = fget[f].get('group_operator', 'sum')
-                 if flist:
-                     flist += ', '
-                 qualified_field = '"%s"."%s"' % (self._table, f)
-                 flist += "%s(%s) AS %s" % (group_operator, qualified_field, f)
+         aggregated_fields = [
+             f for f in fields
+             if f not in ('id', 'sequence')
+             if fget[f]['type'] in ('integer', 'float')
+             if (f in self._columns and getattr(self._columns[f], '_classic_write'))]
+         for f in aggregated_fields:
+             group_operator = fget[f].get('group_operator', 'sum')
+             if flist:
+                 flist += ', '
+             qualified_field = '"%s"."%s"' % (self._table, f)
+             flist += "%s(%s) AS %s" % (group_operator, qualified_field, f)
  
          gb = groupby and (' GROUP BY ' + qualified_groupby_field) or ''
  
              alldata[r['id']] = r
              del r['id']
  
-         data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=orderby or groupby, context=context)
+         order = orderby or groupby
+         data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=order, context=context)
          # the IDS of records that have groupby field value = False or '' should be sorted too
          data_ids += filter(lambda x:x not in data_ids, alldata.keys())
          data = self.read(cr, uid, data_ids, groupby and [groupby] or ['id'], context=context)
              del d['id']
  
          if groupby and groupby in self._group_by_full:
-             gids = [x[groupby][0] for x in data if x[groupby]]
-             stages = self._group_by_full[groupby](self, cr, uid, gids, domain, context)
-             # as both lists are sorted in the same way, we can merge in one pass
-             pos = 0
-             while stages and ((pos<len(data)) or (pos<len(stages))):
-                 if (pos<len(data)) and (not data[pos][groupby] or (data[pos][groupby][0] == stages[pos][0])):
-                     pos+=1
-                     continue
-                 val = dict.fromkeys(float_int_fields, False)
-                 val.update({
-                     groupby: stages[pos],
-                     '__domain': [(groupby, '=', stages[pos][0])]+domain,
-                     groupby+'_count': 0L,
-                     '__context': {'group_by': groupby_list[1:]}
-                 })
-                 data.insert(pos, val)
+             data = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
+                                                  aggregated_fields, data, read_group_order=order,
+                                                  context=context)
          return data
  
      def _inherits_join_add(self, current_table, parent_model_name, query):
          upd_todo = []
          unknown_fields = []
          for v in vals.keys():
-             if v in self._inherit_fields:
+             if v in self._inherit_fields and v not in self._columns:
                  (table, col, col_detail, original_parent) = self._inherit_fields[v]
                  tocreate[table][v] = vals[v]
                  del vals[v]
diff --combined setup.py
index 3f10868,a239b6f..10c98d7
mode 100644,100644..100755
+++ b/setup.py
++#!/usr/bin/env python
++# -*- coding: utf-8 -*-
++##############################################################################
++#
++#    OpenERP, Open Source Management Solution
++#    Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
++#
++#    This program is free software: you can redistribute it and/or modify
++#    it under the terms of the GNU Affero General Public License as
++#    published by the Free Software Foundation, either version 3 of the
++#    License, or (at your option) any later version.
++#
++#    This program is distributed in the hope that it will be useful,
++#    but WITHOUT ANY WARRANTY; without even the implied warranty of
++#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++#    GNU Affero General Public License for more details.
++#
++#    You should have received a copy of the GNU Affero General Public License
++#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
++#
++##############################################################################
  
--import os
--import re
--import sys
--from setuptools import setup, find_packages
++import glob, os, re, setuptools, sys
++from os.path import join, isfile
  
--execfile('addons/web/common/release.py')
++# List all data files
++def data():
++    files = []
++    for root, dirnames, filenames in os.walk('openerp'):
++        for filename in filenames:
++            if not re.match(r'.*(\.pyc|\.pyo|\~)$',filename):
++                files.append(os.path.join(root, filename))
++    d = {}
++    for v in files:
++        k=os.path.dirname(v)
++        if k in d:
++            d[k].append(v)
++        else:
++            d[k]=[v]
++    r = d.items()
++    if os.name == 'nt':
++        r.append(("Microsoft.VC90.CRT", glob.glob('C:\Microsoft.VC90.CRT\*.*')))
++    return r
  
--version_dash_incompatible = False
--if 'bdist_rpm' in sys.argv:
--    version_dash_incompatible = True
--try:
--    import py2exe
--    from py2exe_utils import opts
--    version_dash_incompatible = True
--except ImportError:
--    opts = {}
--if version_dash_incompatible:
--    version = version.split('-')[0]
++def gen_manifest():
++    file_list="\n".join(data())
++    open('MANIFEST','w').write(file_list)
  
--FILE_PATTERNS = \
--    r'.+\.(py|cfg|po|pot|mo|txt|rst|gif|png|jpg|ico|mako|html|js|css|htc|swf)$'
--def find_data_files(source, patterns=FILE_PATTERNS):
--    file_matcher = re.compile(patterns, re.I)
--    out = []
--    for base, _, files in os.walk(source):
--        cur_files = []
--        for f in files:
--            if file_matcher.match(f):
--                cur_files.append(os.path.join(base, f))
--        if cur_files:
--            out.append(
--                (base, cur_files))
++if os.name == 'nt':
++    sys.path.append("C:\Microsoft.VC90.CRT")
  
--    return out
++def py2exe_options():
++    if os.name == 'nt':
++        import py2exe
++        return {
++            "console" : [ { "script": "openerp-server", "icon_resources": [(1, join("install","openerp-icon.ico"))], }],
++            'options' : {
++                "py2exe": {
++                    "skip_archive": 1,
++                    "optimize": 2,
++                    "dist_dir": 'dist',
++                    "packages": [ "DAV", "HTMLParser", "PIL", "asynchat", "asyncore", "commands", "dateutil", "decimal", "email", "encodings", "imaplib", "lxml", "lxml._elementpath", "lxml.builder", "lxml.etree", "lxml.objectify", "mako", "openerp", "poplib", "pychart", "pydot", "pyparsing", "reportlab", "select", "simplejson", "smtplib", "uuid", "vatnumber", "vobject", "xml", "xml.dom", "yaml", ],
++                    "excludes" : ["Tkconstants","Tkinter","tcl"],
++                }
++            }
++        }
++    else:
++        return {}
  
--setup(
--    name=name,
--    version=version,
--    description=description,
--    long_description=long_description,
--    author=author,
--    author_email=author_email,
--    url=url,
--    download_url=download_url,
--    license=license,
--    install_requires=[
--        "Babel >= 0.9.6",
--        "simplejson >= 2.0.9",
-         "python-dateutil >= 1.4.1",
-         "pytz",
-         "werkzeug == 0.7",
-     ],
-     tests_require=[
-         'unittest2',
-         'mock',
-     ],
-     test_suite = 'unittest2.collector',
-     zip_safe=False,
-     packages=find_packages(),
-     classifiers=[
-         'Development Status :: 6 - Production/Stable',
-         'Operating System :: OS Independent',
-         'Programming Language :: Python',
-         'Environment :: Web Environment',
-         'Topic :: Office/Business :: Financial',
-         ],
-     scripts=['openerp-web'],
-     data_files=(find_data_files('addons')
-               + opts.pop('data_files', [])
-     ),
-     **opts
 -        "python-dateutil >= 1.4.1, < 2",
 -        "pytz",
 -        "werkzeug == 0.7",
 -    ],
 -    tests_require=[
 -        'unittest2',
 -        'mock',
 -    ],
 -    test_suite = 'unittest2.collector',
 -    zip_safe=False,
 -    packages=find_packages(),
 -    classifiers=[
 -        'Development Status :: 6 - Production/Stable',
 -        'Operating System :: OS Independent',
 -        'Programming Language :: Python',
 -        'Environment :: Web Environment',
 -        'Topic :: Office/Business :: Financial',
 -        ],
 -    scripts=['openerp-web'],
 -    data_files=(find_data_files('addons')
 -              + opts.pop('data_files', [])
 -    ),
 -    **opts
++execfile(join(os.path.dirname(__file__), 'openerp', 'release.py'))
++
++setuptools.setup(
++      name             = 'openerp',
++      version          = version,
++      description      = description,
++      long_description = long_desc,
++      url              = url,
++      author           = author,
++      author_email     = author_email,
++      classifiers      = filter(None, classifiers.split("\n")),
++      license          = license,
++      scripts          = ['openerp-server'],
++      data_files       = data(),
++      packages         = setuptools.find_packages(),
++      #include_package_data = True,
++      install_requires = [
++        # TODO the pychart package we include in openerp corresponds to PyChart 1.37.
++        # It seems there is a single difference, which is a spurious print in generate_docs.py.
++        # It is probably safe to move to PyChart 1.39 (the latest one).
++        # (Let setup.py choose the latest one, and we should check we can remove pychart from
++        # our tree.) http://download.gna.org/pychart/
++        # TODO  'pychart',
++          'babel',
++          'feedparser',
++          'gdata',
++          'lxml',
++          'mako',
++          'psycopg2',
++          'pydot',
++          'python-dateutil < 2',
++          'python-ldap',
++          'python-openid',
++          'pytz',
++          'pywebdav',
++          'pyyaml',
++          'reportlab',
++          'simplejson',
++          'vatnumber',
++          'vobject',
++          'werkzeug',
++          'zsi',
++      ],
++      extras_require = {
++          'SSL' : ['pyopenssl'],
++      },
++      **py2exe_options()
  )
++
++
++# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: