pass
- def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,
+ def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
+ aggregated_fields, count_field,
read_group_result, read_group_order=None, context=None):
"""Helper method for filling in empty groups for all possible values of
the field being grouped by"""
result.append(left_side)
known_values[grouped_value] = left_side
else:
- count_attr = groupby + '_count'
- known_values[grouped_value].update({count_attr: left_side[count_attr]})
+ known_values[grouped_value].update({count_field: left_side[count_field]})
def append_right(right_side):
grouped_value = right_side[0]
if not grouped_value in known_values:
count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
else:
count_field = '_'
+ count_field += '_count'
prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
query = """
- SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count %(extra_fields)s
+ SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
FROM %(from)s
%(where)s
%(groupby)s
# method _read_group_fill_results need to be completely reimplemented
# in a sane way
result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
- aggregated_fields, result, read_group_order=order,
+ aggregated_fields, count_field, result, read_group_order=order,
context=context)
return result
elif self.env.field_todo(field):
# field must be recomputed, do not prefetch records to recompute
records -= self.env.field_todo(field)
+ elif not self._context.get('prefetch_fields', True):
+ # do not prefetch other fields
+ pass
elif self._columns[field.name]._prefetch:
# here we can optimize: prefetch all classic and many2one fields
fnames = set(fname
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
pool_model_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
+ ir_attachment_obj = self.pool.get('ir.attachment')
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('delete from ' + self._table + ' ' \
'where id IN %s', (sub_ids,))
if ir_value_ids:
ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
+ # For the same reason, removing the record relevant to ir_attachment
+ # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
+ cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
+ ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
+ if ir_attachment_ids:
+ ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
+
# invalidate the *whole* cache, since the orm does not handle all
# changes made in the database, like cascading delete!
recs.invalidate_cache()
# determine which fields have been modified
for name, oldval in values.iteritems():
+ field = self._fields[name]
newval = record[name]
- if newval != oldval or getattr(newval, '_dirty', False):
- field = self._fields[name]
- result['value'][name] = field.convert_to_write(
- newval, record._origin, subfields.get(name),
- )
- todo.add(name)
+ if field.type in ('one2many', 'many2many'):
+ if newval != oldval or newval._dirty:
+ # put new value in result
+ result['value'][name] = field.convert_to_write(
+ newval, record._origin, subfields.get(name),
+ )
+ todo.add(name)
+ else:
+ # keep result: newval may have been dirty before
+ pass
+ else:
+ if newval != oldval:
+ # put new value in result
+ result['value'][name] = field.convert_to_write(
+ newval, record._origin, subfields.get(name),
+ )
+ todo.add(name)
+ else:
+ # clean up result to not return another value
+ result['value'].pop(name, None)
# At the moment, the client does not support updates on a *2many field
# while this one is modified by the user.