[MERGE] Forward-port saas-3 up to 09ab2d4
authorOlivier Dony <odo@openerp.com>
Wed, 17 Sep 2014 13:15:22 +0000 (15:15 +0200)
committerOlivier Dony <odo@openerp.com>
Wed, 17 Sep 2014 13:16:30 +0000 (15:16 +0200)
1  2 
addons/point_of_sale/point_of_sale.py
openerp/modules/loading.py
openerp/osv/orm.py

@@@ -61,15 -61,15 +61,15 @@@ class pos_config(osv.osv)
          'journal_ids' : fields.many2many('account.journal', 'pos_config_journal_rel', 
               'pos_config_id', 'journal_id', 'Available Payment Methods',
               domain="[('journal_user', '=', True ), ('type', 'in', ['bank', 'cash'])]",),
 -        'warehouse_id' : fields.many2one('stock.warehouse', 'Warehouse',
 -             required=True),
 +        'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'),
 +        'stock_location_id': fields.many2one('stock.location', 'Stock Location', domain=[('usage', '=', 'internal')], required=True),
          'journal_id' : fields.many2one('account.journal', 'Sale Journal',
               domain=[('type', '=', 'sale')],
               help="Accounting journal used to post sales entries."),
          'currency_id' : fields.function(_get_currency, type="many2one", string="Currency", relation="res.currency"),
          'iface_self_checkout' : fields.boolean('Self Checkout Mode',
               help="Check this if this point of sale should open by default in a self checkout mode. If unchecked, OpenERP uses the normal cashier mode by default."),
 -        'iface_cashdrawer' : fields.boolean('Cashdrawer',help="Automatically open the cashdrawer"),
 +        'iface_cashdrawer' : fields.boolean('Cashdrawer', help="Automatically open the cashdrawer"),
          'iface_payment_terminal' : fields.boolean('Payment Terminal', help="Enables Payment Terminal integration"),
          'iface_electronic_scale' : fields.boolean('Electronic Scale', help="Enables Electronic Scale integration"),
          'iface_vkeyboard' : fields.boolean('Virtual KeyBoard', help="Enables an integrated Virtual Keyboard"),
@@@ -87,8 -87,7 +87,8 @@@
                  "to customize the reference numbers of your orders."),
          'session_ids': fields.one2many('pos.session', 'config_id', 'Sessions'),
          'group_by' : fields.boolean('Group Journal Items', help="Check this if you want to group the Journal Items by Product while closing a Session"),
 -        'pricelist_id': fields.many2one('product.pricelist','Pricelist', required=True)
 +        'pricelist_id': fields.many2one('product.pricelist','Pricelist', required=True),
 +        'company_id': fields.many2one('res.company', 'Company', required=True), 
      }
  
      def _check_cash_control(self, cr, uid, ids, context=None):
          res = self.pool.get('account.journal').search(cr, uid, [('type', '=', 'sale'), ('company_id', '=', company_id)], limit=1, context=context)
          return res and res[0] or False
  
 -    def _default_warehouse(self, cr, uid, context=None):
 -        user = self.pool.get('res.users').browse(cr, uid, uid, context)
 -        res = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
 -        return res and res[0] or False
 -
      def _default_pricelist(self, cr, uid, context=None):
          res = self.pool.get('product.pricelist').search(cr, uid, [('type', '=', 'sale')], limit=1, context=context)
          return res and res[0] or False
  
 +    def _get_default_location(self, cr, uid, context=None):
 +        wh_obj = self.pool.get('stock.warehouse')
 +        user = self.pool.get('res.users').browse(cr, uid, uid, context)
 +        res = wh_obj.search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
 +        if res and res[0]:
 +            return wh_obj.browse(cr, uid, res[0], context=context).lot_stock_id.id
 +        return False
 +
 +    def _get_default_company(self, cr, uid, context=None):
 +        company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
 +        return company_id
 +
      _defaults = {
          'state' : POS_CONFIG_STATE[0][0],
 -        'warehouse_id': _default_warehouse,
          'journal_id': _default_sale_journal,
          'group_by' : True,
          'pricelist_id': _default_pricelist,
          'iface_invoicing': True,
 +        'stock_location_id': _get_default_location,
 +        'company_id': _get_default_company,
      }
  
 +    def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None):
 +        p_type_obj = self.pool.get("stock.picking.type")
 +        p_type = p_type_obj.browse(cr, uid, picking_type_id, context=context)
 +        if p_type.default_location_src_id and p_type.default_location_src_id.usage == 'internal' and p_type.default_location_dest_id and p_type.default_location_dest_id.usage == 'customer':
 +            return {'value': {'stock_location_id': p_type.default_location_src_id.id}}
 +        return False
 +
      def set_active(self, cr, uid, ids, context=None):
          return self.write(cr, uid, ids, {'state' : 'active'}, context=context)
  
@@@ -350,7 -334,7 +350,7 @@@ class pos_session(osv.osv)
          # the .xml files as the CoA is not yet installed.
          jobj = self.pool.get('pos.config')
          pos_config = jobj.browse(cr, uid, config_id, context=context)
 -        context.update({'company_id': pos_config.warehouse_id.company_id.id})
 +        context.update({'company_id': pos_config.company_id.id})
          if not pos_config.journal_id:
              jid = jobj.default_get(cr, uid, ['journal_id'], context=context)['journal_id']
              if jid:
                  if not cashids:
                      cashids = journal_proxy.search(cr, uid, [('journal_user','=',True)], context=context)
  
+             journal_proxy.write(cr, uid, cashids, {'journal_user': True})
              jobj.write(cr, uid, [pos_config.id], {'journal_ids': [(6,0, cashids)]})
  
  
              bank_values = {
                  'journal_id' : journal.id,
                  'user_id' : uid,
 -                'company_id' : pos_config.warehouse_id.company_id.id
 +                'company_id' : pos_config.company_id.id
              }
              statement_id = self.pool.get('account.bank.statement').create(cr, uid, bank_values, context=context)
              bank_statement_ids.append(statement_id)
                  if st.difference and st.journal_id.cash_control == True:
                      if st.difference > 0.0:
                          name= _('Point of Sale Profit')
 -                        account_id = st.journal_id.profit_account_id.id
                      else:
 -                        account_id = st.journal_id.loss_account_id.id
                          name= _('Point of Sale Loss')
 -                    if not account_id:
 -                        raise osv.except_osv( _('Error!'),
 -                        _("Please set your profit and loss accounts on your payment method '%s'. This will allow OpenERP to post the difference of %.2f in your ending balance. To close this session, you can update the 'Closing Cash Control' to avoid any difference.") % (st.journal_id.name,st.difference))
                      bsl.create(cr, uid, {
                          'statement_id': st.id,
                          'amount': st.difference,
                          'ref': record.name,
                          'name': name,
 -                        'account_id': account_id
                      }, context=context)
  
                  if st.journal_id.type == 'bank':
@@@ -523,7 -514,8 +524,7 @@@ class pos_order(osv.osv)
      _description = "Point of Sale"
      _order = "id desc"
  
 -    def create_from_ui(self, cr, uid, orders, context=None):      
 -        
 +    def create_from_ui(self, cr, uid, orders, context=None):
          # Keep only new orders
          submitted_references = [o['data']['name'] for o in orders]
          existing_order_ids = self.search(cr, uid, [('pos_reference', 'in', submitted_references)], context=context)
          return {'value': {'pricelist_id': pricelist}}
  
      def _amount_all(self, cr, uid, ids, name, args, context=None):
 -        tax_obj = self.pool.get('account.tax')
          cur_obj = self.pool.get('res.currency')
          res = {}
          for order in self.browse(cr, uid, ids, context=context):
      _columns = {
          'name': fields.char('Order Ref', size=64, required=True, readonly=True),
          'company_id':fields.many2one('res.company', 'Company', required=True, readonly=True),
 -        'warehouse_id': fields.related('session_id', 'config_id', 'warehouse_id', relation='stock.warehouse', type='many2one', string='Warehouse', store=True, readonly=True),
          'date_order': fields.datetime('Order Date', readonly=True, select=True),
          'user_id': fields.many2one('res.users', 'Salesman', help="Person who uses the the cash register. It can be a reliever, a student or an interim employee."),
          'amount_tax': fields.function(_amount_all, string='Taxes', digits_compute=dp.get_precision('Account'), multi='all'),
          'invoice_id': fields.many2one('account.invoice', 'Invoice'),
          'account_move': fields.many2one('account.move', 'Journal Entry', readonly=True),
          'picking_id': fields.many2one('stock.picking', 'Picking', readonly=True),
 +        'picking_type_id': fields.related('session_id', 'config_id', 'picking_type_id', string="Picking Type", type='many2one', relation='stock.picking.type'),
 +        'location_id': fields.related('session_id', 'config_id', 'stock_location_id', string="Location", type='many2one', store=True, relation='stock.location'),
          'note': fields.text('Internal Notes'),
          'nb_print': fields.integer('Number of Print', readonly=True),
          'pos_reference': fields.char('Receipt Ref', size=64, readonly=True),
              return session_record.config_id.pricelist_id and session_record.config_id.pricelist_id.id or False
          return False
  
 +    def _get_out_picking_type(self, cr, uid, context=None):
 +        return self.pool.get('ir.model.data').xmlid_to_res_id(
 +                    cr, uid, 'point_of_sale.picking_type_posout', context=context)
 +
      _defaults = {
          'user_id': lambda self, cr, uid, context: uid,
          'state': 'draft',
  
      def create_picking(self, cr, uid, ids, context=None):
          """Create a picking for each order and validate it."""
 -        picking_obj = self.pool.get('stock.picking.out')
 +        picking_obj = self.pool.get('stock.picking')
          partner_obj = self.pool.get('res.partner')
          move_obj = self.pool.get('stock.move')
  
          for order in self.browse(cr, uid, ids, context=context):
              addr = order.partner_id and partner_obj.address_get(cr, uid, [order.partner_id.id], ['delivery']) or {}
 -            picking_id = picking_obj.create(cr, uid, {
 -                'origin': order.name,
 -                'partner_id': addr.get('delivery',False),
 -                'type': 'out',
 -                'company_id': order.company_id.id,
 -                'move_type': 'direct',
 -                'note': order.note or "",
 -                'invoice_state': 'none',
 -                'auto_picking': True,
 -            }, context=context)
 -            self.write(cr, uid, [order.id], {'picking_id': picking_id}, context=context)
 -            location_id = order.warehouse_id.lot_stock_id.id
 +            picking_type = order.picking_type_id
 +            picking_id = False
 +            if picking_type:
 +                picking_id = picking_obj.create(cr, uid, {
 +                    'origin': order.name,
 +                    'partner_id': addr.get('delivery',False),
 +                    'picking_type_id': picking_type.id,
 +                    'company_id': order.company_id.id,
 +                    'move_type': 'direct',
 +                    'note': order.note or "",
 +                    'invoice_state': 'none',
 +                }, context=context)
 +                self.write(cr, uid, [order.id], {'picking_id': picking_id}, context=context)
 +            location_id = order.location_id.id
              if order.partner_id:
                  destination_id = order.partner_id.property_stock_customer.id
 +            elif picking_type:
 +                if not picking_type.default_location_dest_id:
 +                    raise osv.except_osv(_('Error!'), _('Missing source or destination location for picking type %s. Please configure those fields and try again.' % (picking_type.name,)))
 +                destination_id = picking_type.default_location_dest_id.id
              else:
                  destination_id = partner_obj.default_get(cr, uid, ['property_stock_customer'], context=context)['property_stock_customer']
  
 +            move_list = []
              for line in order.lines:
                  if line.product_id and line.product_id.type == 'service':
                      continue
  
 -                move_obj.create(cr, uid, {
 +                move_list.append(move_obj.create(cr, uid, {
                      'name': line.name,
                      'product_uom': line.product_id.uom_id.id,
                      'product_uos': line.product_id.uom_id.id,
                      'picking_id': picking_id,
 +                    'picking_type_id': picking_type.id, 
                      'product_id': line.product_id.id,
                      'product_uos_qty': abs(line.qty),
 -                    'product_qty': abs(line.qty),
 -                    'tracking_id': False,
 +                    'product_uom_qty': abs(line.qty),
                      'state': 'draft',
                      'location_id': location_id if line.qty >= 0 else destination_id,
                      'location_dest_id': destination_id if line.qty >= 0 else location_id,
 -                }, context=context)
 -            
 -            picking_obj.signal_button_confirm(cr, uid, [picking_id])
 -            picking_obj.force_assign(cr, uid, [picking_id], context)
 +                }, context=context))
 +                
 +            if picking_id:
 +                picking_obj.action_confirm(cr, uid, [picking_id], context=context)
 +                picking_obj.force_assign(cr, uid, [picking_id], context=context)
 +                picking_obj.action_done(cr, uid, [picking_id], context=context)
 +            elif move_list:
 +                move_obj.action_confirm(cr, uid, move_list, context=context)
 +                move_obj.force_assign(cr, uid, move_list, context=context)
 +                move_obj.action_done(cr, uid, move_list, context=context)
          return True
  
      def cancel_order(self, cr, uid, ids, context=None):
          """
          stock_picking_obj = self.pool.get('stock.picking')
          for order in self.browse(cr, uid, ids, context=context):
 -            stock_picking_obj.signal_button_cancel(cr, uid, [order.picking_id.id])
 +            stock_picking_obj.action_cancel(cr, uid, [order.picking_id.id])
              if stock_picking_obj.browse(cr, uid, order.picking_id.id, context=context).state <> 'cancel':
                  raise osv.except_osv(_('Error!'), _('Unable to cancel the picking.'))
          self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
              'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False,
          }
  
 -        account_def = property_obj.get(cr, uid, 'property_account_receivable', 'res.partner', context=context)
 -        args['account_id'] = (order.partner_id and order.partner_id.property_account_receivable \
 -                             and order.partner_id.property_account_receivable.id) or (account_def and account_def.id) or False
 -
 -        if not args['account_id']:
 -            if not args['partner_id']:
 -                msg = _('There is no receivable account defined to make payment.')
 -            else:
 -                msg = _('There is no receivable account defined to make payment for the partner: "%s" (id:%d).') % (order.partner_id.name, order.partner_id.id,)
 -            raise osv.except_osv(_('Configuration Error!'), msg)
 -
          context.pop('pos_session_id', False)
  
          journal_id = data.get('journal', False)
              'statement_id' : statement_id,
              'pos_statement_id' : order_id,
              'journal_id' : journal_id,
 -            'type' : 'customer',
              'ref' : order.session_id.name,
          })
  
                      'qty': -order_line.qty
                  }, context=context)
  
 -        new_order = ','.join(map(str,clone_list))
          abs = {
 -            #'domain': "[('id', 'in', ["+new_order+"])]",
              'name': _('Return Products'),
              'view_type': 'form',
              'view_mode': 'form',
                                                                 line.product_id.uom_id.id,
                                                                 line.qty, partner_id = order.partner_id.id,
                                                                 fposition_id=order.partner_id.property_account_position.id)['value'])
 -                if line.product_id.description_sale:
 -                    inv_line['note'] = line.product_id.description_sale
                  inv_line['price_unit'] = line.price_unit
                  inv_line['discount'] = line.discount
                  inv_line['name'] = inv_name
@@@ -1298,80 -1289,30 +1299,80 @@@ class ean_wizard(osv.osv_memory)
              self.pool[m].write(cr,uid,[m_id],{'ean13':ean13})
          return { 'type' : 'ir.actions.act_window_close' }
  
 -class product_product(osv.osv):
 -    _inherit = 'product.product'
 +class pos_category(osv.osv):
 +    _name = "pos.category"
 +    _description = "Public Category"
 +    _order = "sequence, name"
 +
 +    _constraints = [
 +        (osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
 +    ]
 +
 +    def name_get(self, cr, uid, ids, context=None):
 +        if not len(ids):
 +            return []
 +        reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
 +        res = []
 +        for record in reads:
 +            name = record['name']
 +            if record['parent_id']:
 +                name = record['parent_id'][1]+' / '+name
 +            res.append((record['id'], name))
 +        return res
  
 +    def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
 +        res = self.name_get(cr, uid, ids, context=context)
 +        return dict(res)
  
 -    #def _get_small_image(self, cr, uid, ids, prop, unknow_none, context=None):
 -    #    result = {}
 -    #    for obj in self.browse(cr, uid, ids, context=context):
 -    #        if not obj.product_image:
 -    #            result[obj.id] = False
 -    #            continue
 +    def _get_image(self, cr, uid, ids, name, args, context=None):
 +        result = dict.fromkeys(ids, False)
 +        for obj in self.browse(cr, uid, ids, context=context):
 +            result[obj.id] = tools.image_get_resized_images(obj.image)
 +        return result
 +    
 +    def _set_image(self, cr, uid, id, name, value, args, context=None):
 +        return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
 +
 +    _columns = {
 +        'name': fields.char('Name', required=True, translate=True),
 +        'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
 +        'parent_id': fields.many2one('pos.category','Parent Category', select=True),
 +        'child_id': fields.one2many('pos.category', 'parent_id', string='Children Categories'),
 +        'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of product categories."),
 +        
 +        # NOTE: there is no 'default image', because by default we don't show thumbnails for categories. However if we have a thumbnail
 +        # for at least one category, then we display a default image on the other, so that the buttons have consistent styling.
 +        # In this case, the default image is set by the js code.
 +        # NOTE2: image: all image fields are base64 encoded and PIL-supported
 +        'image': fields.binary("Image",
 +            help="This field holds the image used as image for the cateogry, limited to 1024x1024px."),
 +        'image_medium': fields.function(_get_image, fnct_inv=_set_image,
 +            string="Medium-sized image", type="binary", multi="_get_image",
 +            store={
 +                'pos.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
 +            },
 +            help="Medium-sized image of the category. It is automatically "\
 +                 "resized as a 128x128px image, with aspect ratio preserved. "\
 +                 "Use this field in form views or some kanban views."),
 +        'image_small': fields.function(_get_image, fnct_inv=_set_image,
 +            string="Smal-sized image", type="binary", multi="_get_image",
 +            store={
 +                'pos.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
 +            },
 +            help="Small-sized image of the category. It is automatically "\
 +                 "resized as a 64x64px image, with aspect ratio preserved. "\
 +                 "Use this field anywhere a small image is required."),
 +    }
  
 -    #        image_stream = io.BytesIO(obj.product_image.decode('base64'))
 -    #        img = Image.open(image_stream)
 -    #        img.thumbnail((120, 100), Image.ANTIALIAS)
 -    #        img_stream = StringIO.StringIO()
 -    #        img.save(img_stream, "JPEG")
 -    #        result[obj.id] = img_stream.getvalue().encode('base64')
 -    #    return result
 +class product_template(osv.osv):
 +    _inherit = 'product.template'
  
      _columns = {
          'income_pdt': fields.boolean('Point of Sale Cash In', help="Check if, this is a product you can use to put cash into a statement for the point of sale backend."),
          'expense_pdt': fields.boolean('Point of Sale Cash Out', help="Check if, this is a product you can use to take cash from a statement for the point of sale backend, example: money lost, transfer to bank, etc."),
          'available_in_pos': fields.boolean('Available in the Point of Sale', help='Check if you want this product to appear in the Point of Sale'), 
 -        'to_weight' : fields.boolean('To Weight', help="Check if the product should be weighted (mainly used with self check-out interface)."),
 +        'to_weight' : fields.boolean('To Weigh', help="Check if the product should be weighted (mainly used with self check-out interface)."),
 +        'pos_categ_id': fields.many2one('pos.category','Point of Sale Category', help="Those categories are used to group similar products for point of sale."),
      }
  
      _defaults = {
          'available_in_pos': True,
      }
  
 -    def edit_ean(self, cr, uid, ids, context):
 -        return {
 -            'name': _("Assign a Custom EAN"),
 -            'type': 'ir.actions.act_window',
 -            'view_type': 'form',
 -            'view_mode': 'form',
 -            'res_model': 'pos.ean_wizard',
 -            'target' : 'new',
 -            'view_id': False,
 -            'context':context,
 -        }
 -
  # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
@@@ -3,7 -3,7 +3,7 @@@
  #
  #    OpenERP, Open Source Management Solution
  #    Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
 -#    Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
 +#    Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
  #
  #    This program is free software: you can redistribute it and/or modify
  #    it under the terms of the GNU Affero General Public License as
@@@ -42,7 -42,6 +42,7 @@@ from openerp import SUPERUSER_I
  from openerp.tools.translate import _
  from openerp.modules.module import initialize_sys_path, \
      load_openerp_module, init_module_models, adapt_version
 +from module import runs_post_install
  
  _logger = logging.getLogger(__name__)
  _test_logger = logging.getLogger('openerp.tests')
@@@ -153,6 -152,7 +153,6 @@@ def load_module_graph(cr, graph, status
          loaded_modules.append(package.name)
          if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
              init_module_models(cr, package.name, models)
 -        registry._init_modules.add(package.name)
          status['progress'] = float(index) / len(graph)
  
          # Can't put this line out of the loop: ir.module.module will be
  
              migrations.migrate_module(package, 'post')
  
 +            registry._init_modules.add(package.name)
 +            # validate all the views at a whole
 +            registry['ir.ui.view']._validate_module_views(cr, SUPERUSER_ID, module_name)
 +
              if has_demo:
 -                # launch tests only in demo mode, as most tests will depend
 -                # on demo data. Other tests can be added into the regular
 -                # 'data' section, but should probably not alter the data,
 -                # as there is no rollback.
 +                # launch tests only in demo mode, allowing tests to use demo data.
                  if tools.config.options['test_enable']:
 +                    # Yamel test
                      report.record_result(load_test(module_name, idref, mode))
 -
 -                    # Run the `fast_suite` and `checks` tests given by the module.
 -                    if module_name == 'base':
 -                        # Also run the core tests after the database is created.
 -                        report.record_result(openerp.modules.module.run_unit_tests('openerp'))
 -                    report.record_result(openerp.modules.module.run_unit_tests(module_name))
 +                    # Python tests
 +                    ir_http = registry['ir.http']
 +                    if hasattr(ir_http, '_routing_map'):
 +                        # Force routing map to be rebuilt between each module test suite
 +                        del(ir_http._routing_map)
 +                    report.record_result(openerp.modules.module.run_unit_tests(module_name, cr.dbname))
  
              processed_modules.append(package.name)
  
                  if hasattr(package, kind):
                      delattr(package, kind)
  
 +        registry._init_modules.add(package.name)
          cr.commit()
  
      # The query won't be valid for models created later (i.e. custom model
@@@ -250,6 -247,7 +250,6 @@@ def load_marked_modules(cr, graph, stat
          if not processed: break
      return processed_modules
  
 -
  def load_modules(db, force_demo=False, status=None, update_module=False):
      # TODO status['progress'] reporting is broken: used twice (and reset each
      # time to zero) in load_module_graph, not fine-grained enough.
          if not openerp.modules.db.is_initialized(cr):
              _logger.info("init db")
              openerp.modules.db.initialize(cr)
+             update_module = True # process auto-installed modules
              tools.config["init"]["all"] = 1
              tools.config['update']['all'] = 1
              if not tools.config['without_demo']:
          for model in registry.models.values():
              model._register_hook(cr)
  
 +        # STEP 9: Run the post-install tests
 +        cr.commit()
 +        if openerp.tools.config['test_enable']:
 +            cr.execute("SELECT name FROM ir_module_module WHERE state='installed'")
 +            for module_name in cr.fetchall():
 +                report.record_result(openerp.modules.module.run_unit_tests(module_name[0], cr.dbname, position=runs_post_install))
      finally:
          cr.close()
  
 -
  # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
diff --combined openerp/osv/orm.py
@@@ -53,9 -53,10 +53,10 @@@ import simplejso
  import time
  import traceback
  import types
+ from collections import defaultdict
  
  import babel.dates
 -import dateutil.parser
 +import dateutil.relativedelta
  import psycopg2
  from lxml import etree
  
@@@ -75,7 -76,7 +76,7 @@@ _schema = logging.getLogger(__name__ + 
  # List of etree._Element subclasses that we choose to ignore when parsing XML.
  from openerp.tools import SKIPPED_ELEMENT_TYPES
  
 -regex_order = re.compile('^( *([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
 +regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
  regex_object_name = re.compile(r'^[a-z0-9_.]+$')
  
  AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
@@@ -672,11 -673,11 +673,11 @@@ class BaseModel(object)
  
      OpenERP models are created by inheriting from this class' subclasses:
  
 -        * Model: for regular database-persisted models
 -        * TransientModel: for temporary data, stored in the database but automatically
 -                          vaccuumed every so often
 -        * AbstractModel: for abstract super classes meant to be shared by multiple
 -                        _inheriting classes (usually Models or TransientModels)
 +    * Model: for regular database-persisted models
 +    * TransientModel: for temporary data, stored in the database but automatically
 +                      vaccuumed every so often
 +    * AbstractModel: for abstract super classes meant to be shared by multiple
 +                     _inheriting classes (usually Models or TransientModels)
  
      The system will later instantiate the class once per database (on
      which the class' module is installed).
      _all_columns = {}
  
      _table = None
 -    _invalids = set()
      _log_create = False
      _sql_constraints = []
      _protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
              self._rec_name = 'name'
  
  
 -    def __export_row(self, cr, uid, row, fields, context=None):
 +    def __export_row(self, cr, uid, row, fields, raw_data=False, context=None):
          if context is None:
              context = {}
  
                              break
  
                          for row2 in r:
 -                            lines2 = row2._model.__export_row(cr, uid, row2, fields2,
 -                                    context)
 +                            lines2 = row2._model.__export_row(cr, uid, row2, fields2, context=context)
                              if first:
                                  for fpos2 in range(len(fields)):
                                      if lines2 and lines2[0][fpos2]:
                                  lines += lines2
                          break
                      i += 1
 +
                  if i == len(f):
                      if isinstance(r, browse_record):
                          r = self.pool[r._table_name].name_get(cr, uid, [r.id], context=context)
                          r = r and r[0] and r[0][1] or ''
 -                    data[fpos] = tools.ustr(r or '')
 +                    if raw_data and cols and cols._type in ('integer', 'boolean', 'float'):
 +                        data[fpos] = r
 +                    elif raw_data and cols and cols._type == 'date':
 +                        data[fpos] = datetime.datetime.strptime(r, tools.DEFAULT_SERVER_DATE_FORMAT).date()
 +                    elif raw_data and cols and cols._type == 'datetime':
 +                        data[fpos] = datetime.datetime.strptime(r, tools.DEFAULT_SERVER_DATETIME_FORMAT)
 +                    else:
 +                        data[fpos] = tools.ustr(r or '')
          return [data] + lines
  
 -    def export_data(self, cr, uid, ids, fields_to_export, context=None):
 +    def export_data(self, cr, uid, ids, fields_to_export, raw_data=False, context=None):
          """
          Export fields for selected objects
  
          :param uid: current user id
          :param ids: list of ids
          :param fields_to_export: list of fields
 +        :param raw_data: True to return value in fields type, False for string values
          :param context: context arguments, like lang, time zone
          :rtype: dictionary with a *datas* matrix
  
          fields_to_export = map(fix_import_export_id_paths, fields_to_export)
          datas = []
          for row in self.browse(cr, uid, ids, context):
 -            datas += self.__export_row(cr, uid, row, fields_to_export, context)
 +            datas += self.__export_row(cr, uid, row, fields_to_export, raw_data=raw_data, context=context)
          return {'datas': datas}
  
      def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
  
              yield dbid, xid, converted, dict(extras, record=stream.index)
  
 -    def get_invalid_fields(self, cr, uid):
 -        return list(self._invalids)
 -
      def _validate(self, cr, uid, ids, context=None):
          context = context or {}
          lng = context.get('lang')
                  # Check presence of __call__ directly instead of using
                  # callable() because it will be deprecated as of Python 3.0
                  if hasattr(msg, '__call__'):
 -                    tmp_msg = msg(self, cr, uid, ids, context=context)
 -                    if isinstance(tmp_msg, tuple):
 -                        tmp_msg, params = tmp_msg
 -                        translated_msg = tmp_msg % params
 -                    else:
 -                        translated_msg = tmp_msg
 +                    translated_msg = msg(self, cr, uid, ids, context=context)
 +                    if isinstance(translated_msg, tuple):
 +                        translated_msg = translated_msg[0] % translated_msg[1]
                  else:
                      translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
                  if extra_error:
                  error_msgs.append(
                          _("The field(s) `%s` failed against a constraint: %s") % (', '.join(fields), translated_msg)
                  )
 -                self._invalids.update(fields)
          if error_msgs:
              raise except_orm('ValidateError', '\n'.join(error_msgs))
 -        else:
 -            self._invalids.clear()
  
      def default_get(self, cr, uid, fields_list, context=None):
          """
              }
          return result
  
 +    def get_formview_id(self, cr, uid, id, context=None):
 +        """ Return an view id to open the document with. This method is meant to be
 +            overridden in addons that want to give specific view ids for example.
 +
 +            :param int id: id of the document to open
 +        """
 +        return False
 +
 +    def get_formview_action(self, cr, uid, id, context=None):
 +        """ Return an action to open the document. This method is meant to be
 +            overridden in addons that want to give specific view ids for example.
 +
 +            :param int id: id of the document to open
 +        """
 +        view_id = self.get_formview_id(cr, uid, id, context=context)
 +        return {
 +                'type': 'ir.actions.act_window',
 +                'res_model': self._name,
 +                'view_type': 'form',
 +                'view_mode': 'form',
 +                'views': [(view_id, 'form')],
 +                'target': 'current',
 +                'res_id': id,
 +            }
 +
      def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
          return self.pool['ir.ui.view'].postprocess_and_fields(
              cr, uid, self._name, node, view_id, context=context)
              pass
  
  
 -    def _read_group_fill_results(self, cr, uid, domain, groupby, groupby_list, aggregated_fields,
 +    def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
 +                                 aggregated_fields, count_field,
                                   read_group_result, read_group_order=None, context=None):
          """Helper method for filling in empty groups for all possible values of
             the field being grouped by"""
  
          result_template = dict.fromkeys(aggregated_fields, False)
          result_template[groupby + '_count'] = 0
 -        if groupby_list and len(groupby_list) > 1:
 -            result_template['__context'] = {'group_by': groupby_list[1:]}
 +        if remaining_groupbys:
 +            result_template['__context'] = {'group_by': remaining_groupbys}
  
          # Merge the left_side (current results as dicts) with the right_side (all
          # possible values as m2o pairs). Both lists are supposed to be using the
          result = []
          known_values = {}
  
 -        if len(groupby_list) < 2 and context.get('group_by_no_leaf'):
 -            count_attr = '_'
 -        else:
 -            count_attr = groupby
 -        count_attr += '_count'
 -
          def append_left(left_side):
              grouped_value = left_side[groupby] and left_side[groupby][0]
              if not grouped_value in known_values:
                  result.append(left_side)
                  known_values[grouped_value] = left_side
              else:
 -                known_values[grouped_value].update({count_attr: left_side[count_attr]})
 +                known_values[grouped_value].update({count_field: left_side[count_field]})
          def append_right(right_side):
              grouped_value = right_side[0]
              if not grouped_value in known_values:
                  r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
          return result
  
 -    def _read_group_prepare(self, orderby, aggregated_fields, groupby, qualified_groupby_field, query, groupby_type=None):
 +    def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
          """
          Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
          to the query if order should be computed against m2o field. 
          :param orderby: the orderby definition in the form "%(field)s %(order)s"
          :param aggregated_fields: list of aggregated fields in the query
 -        :param groupby: the current groupby field name
 -        :param qualified_groupby_field: the fully qualified SQL name for the grouped field
 +        :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
 +                These dictionaries contains the qualified name of each groupby
 +                (fully qualified SQL name for the corresponding field),
 +                and the (non raw) field name.
          :param osv.Query query: the query under construction
          :return: (groupby_terms, orderby_terms)
          """
          orderby_terms = []
 -        groupby_terms = [qualified_groupby_field] if groupby else []
 +        groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
 +        groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
          if not orderby:
 -            return groupby_terms, orderby_terms    
 +            return groupby_terms, orderby_terms
  
          self._check_qorder(orderby)
          for order_part in orderby.split(','):
              order_split = order_part.split()
              order_field = order_split[0]
 -            if order_field == groupby:
 -                if groupby_type == 'many2one':
 +            if order_field in groupby_fields:
 +
 +                if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
                      order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
                      if order_clause:
                          orderby_terms.append(order_clause)
                          groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
                  else:
 -                    orderby_terms.append(order_part)
 +                    order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
 +                    orderby_terms.append(order)
              elif order_field in aggregated_fields:
                  orderby_terms.append(order_part)
              else:
                               self._name, order_part)
          return groupby_terms, orderby_terms
  
 -    def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
 +    def _read_group_process_groupby(self, gb, query, context):
 +        """
 +            Helper method to collect important information about groupbys: raw
 +            field name, type, time informations, qualified name, ...
 +        """
 +        split = gb.split(':')
 +        field_type = self._all_columns[split[0]].column._type
 +        gb_function = split[1] if len(split) == 2 else None
 +        temporal = field_type in ('date', 'datetime')
 +        tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
 +        qualified_field = self._inherits_join_calc(split[0], query)
 +        if temporal:
 +            display_formats = {
 +                'day': 'dd MMM YYYY', 
 +                'week': "'W'w YYYY", 
 +                'month': 'MMMM YYYY', 
 +                'quarter': 'QQQ YYYY', 
 +                'year': 'YYYY'
 +            }
 +            time_intervals = {
 +                'day': dateutil.relativedelta.relativedelta(days=1),
 +                'week': datetime.timedelta(days=7),
 +                'month': dateutil.relativedelta.relativedelta(months=1),
 +                'quarter': dateutil.relativedelta.relativedelta(months=3),
 +                'year': dateutil.relativedelta.relativedelta(years=1)
 +            }
 +            if tz_convert:
 +                qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
 +            qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
 +        if field_type == 'boolean':
 +            qualified_field = "coalesce(%s,false)" % qualified_field
 +        return {
 +            'field': split[0],
 +            'groupby': gb,
 +            'type': field_type, 
 +            'display_format': display_formats[gb_function or 'month'] if temporal else None,
 +            'interval': time_intervals[gb_function or 'month'] if temporal else None,                
 +            'tz_convert': tz_convert,
 +            'qualified_field': qualified_field
 +        }
 +
 +    def _read_group_prepare_data(self, key, value, groupby_dict, context):
 +        """
 +            Helper method to sanitize the data received by read_group. The None
 +            values are converted to False, and the date/datetime are formatted,
 +            and corrected according to the timezones.
 +        """
 +        value = False if value is None else value
 +        gb = groupby_dict.get(key)
 +        if gb and gb['type'] in ('date', 'datetime') and value:
 +            if isinstance(value, basestring):
 +                dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
 +                value = datetime.datetime.strptime(value, dt_format)
 +            if gb['tz_convert']:
 +                value =  pytz.timezone(context['tz']).localize(value)
 +        return value
 +
 +    def _read_group_get_domain(self, groupby, value):
 +        """
 +            Helper method to construct the domain corresponding to a groupby and 
 +            a given value. This is mostly relevant for date/datetime.
 +        """
 +        if groupby['type'] in ('date', 'datetime') and value:
 +            dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
 +            domain_dt_begin = value
 +            domain_dt_end = value + groupby['interval']
 +            if groupby['tz_convert']:
 +                domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
 +                domain_dt_end = domain_dt_end.astimezone(pytz.utc)
 +            return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
 +                   (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
 +        if groupby['type'] == 'many2one' and value:
 +                value = value[0]
 +        return [(groupby['field'], '=', value)]
 +
 +    def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
 +        """
 +            Helper method to format the data contained in the dictianary data by 
 +            adding the domain corresponding to its values, the groupbys in the 
 +            context and by properly formatting the date/datetime values. 
 +        """
 +        domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
 +        for k,v in data.iteritems():
 +            gb = groupby_dict.get(k)
 +            if gb and gb['type'] in ('date', 'datetime') and v:
 +                data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
 +
 +        data['__domain'] = domain_group + domain 
 +        if len(groupby) - len(annotated_groupbys) >= 1:
 +            data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
 +        del data['id']
 +        return data
 +
 +    def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
          """
          Get the list of records in list view grouped by the given ``groupby`` fields
  
                               overriding the natural sort ordering of the
                               groups, see also :py:meth:`~osv.osv.osv.search`
                               (supported only for many2one fields currently)
 +        :param bool lazy: if true, the results are only grouped by the first groupby and the 
 +                remaining groupbys are put in the __context key.  If false, all the groupbys are
 +                done in one call.
          :return: list of dictionaries(one dictionary for each record) containing:
  
                      * the values of fields grouped by the fields in ``groupby`` argument
          :rtype: [{'field_name_1': value, ...]
          :raise AccessError: * if user has no read rights on the requested object
                              * if user tries to bypass access rules for read on the requested object
 -
          """
 -        context = context or {}
 +        if context is None:
 +            context = {}
          self.check_access_rights(cr, uid, 'read')
 -        if not fields:
 -            fields = self._columns.keys()
 +        query = self._where_calc(cr, uid, domain, context=context) 
 +        fields = fields or self._columns.keys()
  
 -        query = self._where_calc(cr, uid, domain, context=context)
 -        self._apply_ir_rules(cr, uid, query, 'read', context=context)
 -
 -        # Take care of adding join(s) if groupby is an '_inherits'ed field
 -        groupby_list = groupby
 -        qualified_groupby_field = groupby
 -        if groupby:
 -            if isinstance(groupby, list):
 -                groupby = groupby[0]
 -            splitted_groupby = groupby.split(':')
 -            if len(splitted_groupby) == 2:
 -                groupby = splitted_groupby[0]
 -                groupby_function = splitted_groupby[1]
 -            else:
 -                groupby_function = False
 -            qualified_groupby_field = self._inherits_join_calc(groupby, query)
 +        groupby = [groupby] if isinstance(groupby, basestring) else groupby
 +        groupby_list = groupby[:1] if lazy else groupby
 +        annotated_groupbys = [self._read_group_process_groupby(gb, query, context) 
 +                                    for gb in groupby_list]
 +        groupby_fields = [g['field'] for g in annotated_groupbys]
 +        order = orderby or ','.join([g for g in groupby_list])
 +        groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
  
 -        if groupby:
 -            assert not groupby or groupby in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
 -            groupby_def = self._columns.get(groupby) or (self._inherit_fields.get(groupby) and self._inherit_fields.get(groupby)[2])
 +        self._apply_ir_rules(cr, uid, query, 'read', context=context)
 +        for gb in groupby_fields:
 +            assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
 +            groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
              assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
 -
 -        # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
 -        fget = self.fields_get(cr, uid, fields)
 -        group_by_params = {}
 -        select_terms = []
 -        groupby_type = None
 -        if groupby:
 -            if fget.get(groupby):
 -                groupby_type = fget[groupby]['type']
 -                if groupby_type in ('date', 'datetime'):
 -                    if groupby_function:
 -                        interval = groupby_function
 -                    else:
 -                        interval = 'month'
 -
 -                    if interval == 'day':
 -                        display_format = 'dd MMM YYYY' 
 -                    elif interval == 'week':
 -                        display_format = "'W'w YYYY"
 -                    elif interval == 'month':
 -                        display_format = 'MMMM YYYY'
 -                    elif interval == 'quarter':
 -                        display_format = 'QQQ YYYY'
 -                    elif interval == 'year':
 -                        display_format = 'YYYY'
 -
 -                    if groupby_type == 'datetime' and context.get('tz') in pytz.all_timezones:
 -                        # Convert groupby result to user TZ to avoid confusion!
 -                        # PostgreSQL is compatible with all pytz timezone names, so we can use them
 -                        # directly for conversion, starting with timestamps stored in UTC. 
 -                        timezone = context.get('tz', 'UTC')
 -                        qualified_groupby_field = "timezone('%s', timezone('UTC',%s))" % (timezone, qualified_groupby_field)
 -                    qualified_groupby_field = "date_trunc('%s', %s)" % (interval, qualified_groupby_field)
 -                elif groupby_type == 'boolean':
 -                    qualified_groupby_field = "coalesce(%s,false)" % qualified_groupby_field
 -                select_terms.append("%s as %s " % (qualified_groupby_field, groupby))
 -            else:
 +            if not (gb in self._all_columns):
                  # Don't allow arbitrary values, as this would be a SQL injection vector!
                  raise except_orm(_('Invalid group_by'),
 -                                 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(groupby,))
 +                                 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
  
          aggregated_fields = [
              f for f in fields
 -            if f not in ('id', 'sequence', groupby)
 -            if fget[f]['type'] in ('integer', 'float')
 -            if (f in self._all_columns and getattr(self._all_columns[f].column, '_classic_write'))]
 -        for f in aggregated_fields:
 -            group_operator = fget[f].get('group_operator', 'sum')
 -            qualified_field = self._inherits_join_calc(f, query)
 -            select_terms.append("%s(%s) AS %s" % (group_operator, qualified_field, f))
 +            if f not in ('id', 'sequence')
 +            if f not in groupby_fields
 +            if self._all_columns[f].column._type in ('integer', 'float')
 +            if getattr(self._all_columns[f].column, '_classic_write')]
 +
 +        field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
 +        select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
  
 -        order = orderby or groupby or ''
 -        groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, groupby, qualified_groupby_field, query, groupby_type)
 +        for gb in annotated_groupbys:
 +            select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
  
 +        groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
          from_clause, where_clause, where_clause_params = query.get_sql()
 -        if len(groupby_list) < 2 and context.get('group_by_no_leaf'):
 -            count_field = '_'
 +        if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
 +            count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
          else:
 -            count_field = groupby
 +            count_field = '_'
 +        count_field += '_count'
  
          prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
          prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
  
          query = """
 -            SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count
 -                   %(extra_fields)s
 +            SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
              FROM %(from)s
              %(where)s
              %(groupby)s
              'offset': prefix_term('OFFSET', int(offset) if limit else None),
          }
          cr.execute(query, where_clause_params)
 -        alldata = {}
          fetched_data = cr.dictfetchall()
  
 -        data_ids = []
 -        for r in fetched_data:
 -            for fld, val in r.items():
 -                if val is None: r[fld] = False
 -            alldata[r['id']] = r
 -            data_ids.append(r['id'])
 -            del r['id']
 -
 -        if groupby:
 -            data = self.read(cr, uid, data_ids, [groupby], context=context)
 -            # restore order of the search as read() uses the default _order (this is only for groups, so the footprint of data should be small):
 -            data_dict = dict((d['id'], d[groupby] ) for d in data)
 -            result = [{'id': i, groupby: data_dict[i]} for i in data_ids]
 -        else:
 -            result = [{'id': i} for i in data_ids]
 -
 -        for d in result:
 -            if groupby:
 -                d['__domain'] = [(groupby, '=', alldata[d['id']][groupby] or False)] + domain
 -                if not isinstance(groupby_list, (str, unicode)):
 -                    if groupby or not context.get('group_by_no_leaf', False):
 -                        d['__context'] = {'group_by': groupby_list[1:]}
 -            if groupby and groupby in fget:
 -                groupby_type = fget[groupby]['type']
 -                if d[groupby] and groupby_type in ('date', 'datetime'):
 -                    groupby_datetime = alldata[d['id']][groupby]
 -                    if isinstance(groupby_datetime, basestring):
 -                        _default = datetime.datetime(1970, 1, 1)    # force starts of month
 -                        groupby_datetime = dateutil.parser.parse(groupby_datetime, default=_default)
 -                    tz_convert = groupby_type == 'datetime' and context.get('tz') in pytz.all_timezones
 -                    if tz_convert:
 -                        groupby_datetime =  pytz.timezone(context['tz']).localize(groupby_datetime)
 -                    d[groupby] = babel.dates.format_date(
 -                        groupby_datetime, format=display_format, locale=context.get('lang', 'en_US'))
 -                    domain_dt_begin = groupby_datetime
 -                    if interval == 'quarter':
 -                        domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(months=3)
 -                    elif interval == 'month':
 -                        domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(months=1)
 -                    elif interval == 'week':
 -                        domain_dt_end = groupby_datetime + datetime.timedelta(days=7)
 -                    elif interval == 'day':
 -                        domain_dt_end = groupby_datetime + datetime.timedelta(days=1)
 -                    else:
 -                        domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(years=1)
 -                    if tz_convert:
 -                        # the time boundaries were all computed in the apparent TZ of the user,
 -                        # so we need to convert them to UTC to have proper server-side values.
 -                        domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
 -                        domain_dt_end = domain_dt_end.astimezone(pytz.utc)
 -                    dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby_type == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
 -                    d['__domain'] = [(groupby, '>=', domain_dt_begin.strftime(dt_format)),
 -                                     (groupby, '<', domain_dt_end.strftime(dt_format))] + domain
 -                del alldata[d['id']][groupby]
 -            d.update(alldata[d['id']])
 -            del d['id']
 -
 -        if groupby and groupby in self._group_by_full:
 -            result = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
 -                                                   aggregated_fields, result, read_group_order=order,
 -                                                   context=context)
 -
 +        if not groupby_fields:
 +            return fetched_data
 +
 +        many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
 +        if many2onefields:
 +            data_ids = [r['id'] for r in fetched_data]
 +            many2onefields = list(set(many2onefields))
 +            data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)} 
 +            for d in fetched_data:
 +                d.update(data_dict[d['id']])
 +
 +        data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
 +        result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
 +        if lazy and groupby_fields[0] in self._group_by_full:
 +            # Right now, read_group only fill results in lazy mode (by default).
 +            # If you need to have the empty groups in 'eager' mode, then the
 +            # method _read_group_fill_results need to be completely reimplemented
 +            # in a sane way 
 +            result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
 +                                                       aggregated_fields, count_field, result, read_group_order=order,
 +                                                       context=context)
          return result
  
      def _inherits_join_add(self, current_model, parent_model_name, query):
              if len(constraints) == 1:
                  # Is it the right constraint?
                  cons, = constraints
+                 if self.is_transient() and not dest_model.is_transient():
+                     # transient foreign keys are added as cascade by default
+                     ondelete = ondelete or 'cascade'
                  if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
                      or cons['foreign_table'] != dest_model._table:
                      # Wrong FK: drop it and recreate
                          f_pg_notnull = res['attnotnull']
                          if isinstance(f, fields.function) and not f.store and\
                                  not getattr(f, 'nodrop', False):
 -                            _logger.info('column %s (%s) in table %s removed: converted to a function !\n',
 +                            _logger.info('column %s (%s) converted to a function, removed from table %s',
                                           k, f.string, self._table)
                              cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
                              cr.commit()
          """
          readonly = None
          self.check_field_access_rights(cr, user, 'write', vals.keys())
+         deleted_related = defaultdict(list)
          for field in vals.copy():
              fobj = None
              if field in self._columns:
                  fobj = self._inherit_fields[field][2]
              if not fobj:
                  continue
+             if fobj._type in ['one2many', 'many2many'] and vals[field]:
+                 for wtuple in vals[field]:
+                     if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
+                         deleted_related[fobj._obj].append(wtuple[1])
              groups = fobj.write
  
              if groups:
              for id in ids_to_update:
                  if id not in done[key]:
                      done[key][id] = True
-                     todo.append(id)
+                     if id not in deleted_related[object]:
+                         todo.append(id)
              self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
  
          self.step_workflow(cr, user, ids, context=context)
              self._transient_vacuum(cr, user)
  
          self.check_access_rights(cr, user, 'create')
 -        
 +
          vals = self._add_missing_default_values(cr, user, vals, context)
  
          if self._log_access:
@@@ -5251,7 -5228,7 +5261,7 @@@ def convert_pgerror_23502(model, fields
      m = re.match(r'^null value in column "(?P<field>\w+)" violates '
                   r'not-null constraint\n',
                   str(e))
 -    field_name = m.group('field')
 +    field_name = m and m.group('field')
      if not m or field_name not in fields:
          return {'message': unicode(e)}
      message = _(u"Missing required value for the field '%s'.") % field_name
  def convert_pgerror_23505(model, fields, info, e):
      m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
                   str(e))
 -    field_name = m.group('field')
 +    field_name = m and m.group('field')
      if not m or field_name not in fields:
          return {'message': unicode(e)}
      message = _(u"The value for the field '%s' already exists.") % field_name