"""
+import babel.dates
import calendar
import collections
import copy
import traceback
import types
-import babel.dates
-import dateutil.parser
import psycopg2
from lxml import etree
regex_order = re.compile('^(([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
-AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
-
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
self.__logger.debug(''.join(traceback.format_stack()))
raise KeyError(error_msg)
- prefetchable = lambda f: f._classic_write and f._prefetch and not f.groups and not f.deprecated
-
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
- if prefetchable(col):
+ if col._prefetch:
# gen the list of "local" (ie not inherited) fields which are classic or many2one
- field_filter = lambda x: prefetchable(x[1])
- fields_to_fetch = filter(field_filter, self._table._columns.items())
+ fields_to_fetch = filter(lambda x: x[1]._classic_write, self._table._columns.items())
# gen the list of inherited fields
inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
# complete the field list with the inherited fields which are classic or many2one
- fields_to_fetch += filter(field_filter, inherits)
+ fields_to_fetch += filter(lambda x: x[1]._classic_write, inherits)
# otherwise we fetch only that field
else:
fields_to_fetch = [(name, col)]
-
ids = filter(lambda id: name not in self._data[id], self._data.keys())
# read the results
field_names = map(lambda x: x[0], fields_to_fetch)
- try:
- field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
- except (openerp.exceptions.AccessError, except_orm):
- if len(ids) == 1:
- raise
- # prefetching attempt failed, perhaps we're violating ACL restrictions involuntarily
- _logger.info('Prefetching attempt for fields %s on %s failed for ids %s, re-trying just for id %s', field_names, self._model._name, ids, self._id)
- ids = [self._id]
- field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
+ field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
# TODO: improve this, very slow for reports
if self._fields_process:
continue
sm = f.store
if sm is True:
- sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, f.priority, None)}
+ sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, 10, None)}
for object, aa in sm.items():
if len(aa) == 4:
(fnct, fields2, order, length) = aa
raise except_orm('Error',
('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
self.pool._store_function.setdefault(object, [])
- t = (self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)
- if not t in self.pool._store_function[object]:
- self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
- self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
+ self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
+ self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
for (key, _, msg) in self._sql_constraints:
self.pool._sql_error[self._table+'_'+key] = msg
'ondelete': field['on_delete'],
'translate': (field['translate']),
'manual': True,
- '_prefetch': False,
#'select': int(field['select_level'])
}
children = False
views = {}
for f in node:
- if f.tag in ('form', 'tree', 'graph', 'kanban', 'calendar'):
+ if f.tag in ('form', 'tree', 'graph', 'kanban'):
node.remove(f)
ctx = context.copy()
ctx['base_model_name'] = self._name
in_tree_view = node.tag == 'tree'
elif node.tag == 'calendar':
- for additional_field in ('date_start', 'date_delay', 'date_stop', 'color', 'all_day','attendee'):
+ for additional_field in ('date_start', 'date_delay', 'date_stop', 'color'):
if node.get(additional_field):
fields[node.get(additional_field)] = {}
return False
view = etree.Element('calendar', string=self._description)
- etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
+ etree.SubElement(view, 'field', self._rec_name_fallback(cr, user, context))
if self._date_name not in self._columns:
date_found = False
attribute = (child.get('name'), child.text and child.text.encode('utf8') or None)
if attribute[1]:
node.set(attribute[0], attribute[1])
- elif attribute[0] in node.attrib:
- del node.attrib[attribute[0]]
+ else:
+ del(node.attrib[attribute[0]])
else:
sib = node.getnext()
for child in spec:
are applied
"""
- sql_inherit = self.pool.get('ir.ui.view').get_inheriting_views_arch(cr, user, inherit_id, self._name, context=context)
+ sql_inherit = self.pool.get('ir.ui.view').get_inheriting_views_arch(cr, user, inherit_id, self._name)
for (view_arch, view_id) in sql_inherit:
source = apply_inheritance_specs(source, view_arch, view_id)
source = apply_view_inheritance(cr, user, source, view_id)
sql_res = False
parent_view_model = None
- view_ref_key = view_type + '_view_ref'
- view_ref = context.get(view_ref_key)
+ view_ref = context.get(view_type + '_view_ref')
# Search for a root (i.e. without any parent) view.
while True:
if view_ref and not view_id:
view_ref_res = cr.fetchone()
if view_ref_res:
view_id = view_ref_res[0]
- else:
- _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
- 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
- self._name)
if view_id:
cr.execute("""SELECT arch,name,field_parent,id,type,inherit_id,model
:param uid: current user id
:param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
:param list fields: list of fields present in the list view specified on the object
- :param list groupby: list of groupby descriptions by which the records will be grouped.
- A groupby description is either a field (then it will be grouped by that field)
- or a string 'field:groupby_function'. Right now, the only functions supported
- are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
- date/datetime fields.
+ :param list groupby: fields by which the records will be grouped
:param int offset: optional number of records to skip
:param int limit: optional max number of records to return
- :param dict context: context arguments, like lang, time zone.
+ :param dict context: context arguments, like lang, time zone
:param list orderby: optional ``order by`` specification, for
overriding the natural sort ordering of the
groups, see also :py:meth:`~osv.osv.osv.search`
if groupby:
if isinstance(groupby, list):
groupby = groupby[0]
- splitted_groupby = groupby.split(':')
- if len(splitted_groupby) == 2:
- groupby = splitted_groupby[0]
- groupby_function = splitted_groupby[1]
- else:
- groupby_function = False
qualified_groupby_field = self._inherits_join_calc(groupby, query)
if groupby:
fget = self.fields_get(cr, uid, fields)
flist = ''
group_count = group_by = groupby
- group_by_params = {}
if groupby:
if fget.get(groupby):
groupby_type = fget[groupby]['type']
if groupby_type in ('date', 'datetime'):
- if groupby_function:
- interval = groupby_function
- else:
- interval = 'month'
-
- if interval == 'day':
- display_format = 'dd MMMM YYYY'
- elif interval == 'week':
- display_format = "'W'w"
- elif interval == 'month':
- display_format = 'MMMM'
- elif interval == 'quarter':
- display_format = 'QQQ'
- elif interval == 'year':
- display_format = 'YYYY'
-
- group_by_params = {
- 'display_format': display_format,
- 'interval': interval,
- }
- qualified_groupby_field = "date_trunc('%s',%s)" % (interval, qualified_groupby_field)
+ qualified_groupby_field = "to_char(%s,'yyyy-mm')" % qualified_groupby_field
flist = "%s as %s " % (qualified_groupby_field, groupby)
elif groupby_type == 'boolean':
qualified_groupby_field = "coalesce(%s,false)" % qualified_groupby_field
d['__context'] = {'group_by': groupby_list[1:]}
if groupby and groupby in fget:
if d[groupby] and fget[groupby]['type'] in ('date', 'datetime'):
- groupby_datetime = alldata[d['id']][groupby]
- if isinstance(groupby_datetime, basestring):
- _default = datetime.datetime(1970, 1, 1) # force starts of month
- groupby_datetime = dateutil.parser.parse(groupby_datetime, default=_default)
+ dt = datetime.datetime.strptime(alldata[d['id']][groupby][:7], '%Y-%m')
+ days = calendar.monthrange(dt.year, dt.month)[1]
+
+ date_value = datetime.datetime.strptime(d[groupby][:10], '%Y-%m-%d')
d[groupby] = babel.dates.format_date(
- groupby_datetime, format=group_by_params.get('display_format', 'MMMM yyyy'), locale=context.get('lang', 'en_US'))
- domain_dt_begin = groupby_datetime
- if group_by_params.get('interval') == 'quarter':
- domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(months=3)
- elif group_by_params.get('interval') == 'month':
- domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(months=1)
- elif group_by_params.get('interval') == 'week':
- domain_dt_end = groupby_datetime + datetime.timedelta(days=7)
- elif group_by_params.get('interval') == 'day':
- domain_dt_end = groupby_datetime + datetime.timedelta(days=1)
- else:
- domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(years=1)
- d['__domain'] = [(groupby, '>=', domain_dt_begin.strftime('%Y-%m-%d')), (groupby, '<', domain_dt_end.strftime('%Y-%m-%d'))] + domain
+ date_value, format='MMMM yyyy', locale=context.get('lang', 'en_US'))
+ d['__domain'] = [(groupby, '>=', alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-01', '%Y-%m-%d').strftime('%Y-%m-%d') or False),\
+ (groupby, '<=', alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-' + str(days), '%Y-%m-%d').strftime('%Y-%m-%d') or False)] + domain
del alldata[d['id']][groupby]
d.update(alldata[d['id']])
del d['id']
cr.execute('select id from '+self._table)
ids_lst = map(lambda x: x[0], cr.fetchall())
while ids_lst:
- iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
- ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
+ iids = ids_lst[:40]
+ ids_lst = ids_lst[40:]
res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
for key, val in res.items():
if f._multi:
"""
Record the creation of a constraint for this model, to make it possible
to delete it later when the module is uninstalled. Type can be either
- 'f' or 'u' depending on the constraint being a foreign key or not.
+ 'f' or 'u' depending on the constraing being a foreign key or not.
"""
- if not self._module:
- # no need to save constraints for custom models as they're not part
- # of any module
- return
assert type in ('f', 'u')
cr.execute("""
SELECT 1 FROM ir_model_constraint, ir_module_module
if fields_to_read is None:
fields_to_read = self._columns.keys()
+ # Construct a clause for the security rules.
+ # 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
+ # or will at least contain self._table.
+ rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
+
# all inherited fields + all non inherited fields for which the attribute whose name is in load is True
fields_pre = [f for f in fields_to_read if
f == self.CONCURRENCY_CHECK_FIELD
return 'length(%s) as "%s"' % (f_qual, f)
return f_qual
- # Construct a clause for the security rules.
- # 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
- # or will at least contain self._table.
- rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
-
fields_pre2 = map(convert_field, fields_pre)
order_by = self._parent_order or self._order
select_fields = ','.join(fields_pre2 + ['%s.id' % self._table])
self._check_record_rules_result_count(cr, user, sub_ids, result_ids, 'read', context=context)
res.extend(results)
else:
- self.check_access_rule(cr, user, ids, 'read', context=context)
res = map(lambda x: {'id': x}, ids)
if context.get('lang'):
if not src_trans:
src_trans = vals[f]
# Inserting value to DB
- context_wo_lang = dict(context, lang=None)
- self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
+ self.write(cr, user, ids, {f: vals[f]})
self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
tocreate[v] = {}
else:
tocreate[v] = {'id': vals[self._inherits[v]]}
-
- columns = [
- # columns will contain a list of field defined as a tuple
- # tuple(field_name, format_string, field_value)
- # the tuple will be used by the string formatting for the INSERT
- # statement.
- ('id', "nextval('%s')" % self._sequence),
- ]
-
+ (upd0, upd1, upd2) = ('', '', [])
upd_todo = []
unknown_fields = []
for v in vals.keys():
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
- if not self._sequence:
- raise except_orm(
- _('UserError'),
- _('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.')
- )
+ # Try-except added to filter the creation of those records whose filds are readonly.
+ # Example : any dashboard which has all the fields readonly.(due to Views(database views))
+ try:
+ cr.execute("SELECT nextval('"+self._sequence+"')")
+ except:
+ raise except_orm(_('UserError'),
+ _('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.'))
+ id_new = cr.fetchone()[0]
for table in tocreate:
if self._inherits[table] in vals:
del vals[self._inherits[table]]
else:
self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
- columns.append((self._inherits[table], '%s', record_id))
+ upd0 += ',' + self._inherits[table]
+ upd1 += ',%s'
+ upd2.append(record_id)
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
if not edit:
vals.pop(field)
for field in vals:
- current_field = self._columns[field]
- if current_field._classic_write:
- columns.append((field, '%s', current_field._symbol_set[1](vals[field])))
-
+ if self._columns[field]._classic_write:
+ upd0 = upd0 + ',"' + field + '"'
+ upd1 = upd1 + ',' + self._columns[field]._symbol_set[0]
+ upd2.append(self._columns[field]._symbol_set[1](vals[field]))
#for the function fields that receive a value, we set them directly in the database
#(they may be required), but we also need to trigger the _fct_inv()
- if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
+ if (hasattr(self._columns[field], '_fnct_inv')) and not isinstance(self._columns[field], fields.related):
#TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
#one week of the release candidate. It seems the only good way to handle correctly this is to add an
#attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
else:
#TODO: this `if´ statement should be removed because there is no good reason to special case the fields
#related. See the above TODO comment for further explanations.
- if not isinstance(current_field, fields.related):
+ if not isinstance(self._columns[field], fields.related):
upd_todo.append(field)
if field in self._columns \
- and hasattr(current_field, 'selection') \
+ and hasattr(self._columns[field], 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
- columns.append(('create_uid', '%s', user))
- columns.append(('write_uid', '%s', user))
- columns.append(('create_date', "(now() at time zone 'UTC')"))
- columns.append(('write_date', "(now() at time zone 'UTC')"))
-
- # the list of tuples used in this formatting corresponds to
- # tuple(field_name, format, value)
- # In some case, for example (id, create_date, write_date) we does not
- # need to read the third value of the tuple, because the real value is
- # encoded in the second value (the format).
- cr.execute(
- """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
- self._table,
- ', '.join('"%s"' % f[0] for f in columns),
- ', '.join(f[1] for f in columns)
- ),
- tuple([f[2] for f in columns if len(f) > 2])
- )
-
- id_new, = cr.fetchone()
+ upd0 += ',create_uid,create_date,write_uid,write_date'
+ upd1 += ",%s,(now() at time zone 'UTC'),%s,(now() at time zone 'UTC')"
+ upd2.extend((user, user))
+ cr.execute('insert into "'+self._table+'" (id'+upd0+") values ("+str(id_new)+upd1+')', tuple(upd2))
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
if self._parent_store and not context.get('defer_parent_store_computation'):
self._validate(cr, user, [id_new], context)
if not context.get('no_store_function', False):
- result += self._store_get_values(cr, user, [id_new],
- list(set(vals.keys() + self._inherits.values())),
- context)
+ result += self._store_get_values(cr, user, [id_new], vals.keys(), context)
result.sort()
done = []
for order, model_name, ids, fields2 in result:
return browse_null()
def _store_get_values(self, cr, uid, ids, fields, context):
- """Returns an ordered list of fields.function to call due to
+ """Returns an ordered list of fields.functions to call due to
an update operation on ``fields`` of records with ``ids``,
- obtained by calling the 'store' triggers of these fields,
+ obtained by calling the 'store' functions of these fields,
as setup by their 'store' attribute.
:return: [(priority, model_name, [record_ids,], [function_fields,])]
stored_functions = self.pool._store_function.get(self._name, [])
# use indexed names for the details of the stored_functions:
- model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
+ model_name_, func_field_to_compute_, id_mapping_fnct_, trigger_fields_, priority_ = range(5)
- # only keep store triggers that should be triggered for the ``fields``
+ # only keep functions that should be triggered for the ``fields``
# being written to.
- triggers_to_compute = [f for f in stored_functions \
+ to_compute = [f for f in stored_functions \
if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
- to_compute_map = {}
- target_id_results = {}
- for store_trigger in triggers_to_compute:
- target_func_id_ = id(store_trigger[target_ids_func_])
- if not target_func_id_ in target_id_results:
- # use admin user for accessing objects having rules defined on store fields
- target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
- target_ids = target_id_results[target_func_id_]
+ mapping = {}
+ for function in to_compute:
+ # use admin user for accessing objects having rules defined on store fields
+ target_ids = [id for id in function[id_mapping_fnct_](self, cr, SUPERUSER_ID, ids, context) if id]
# the compound key must consider the priority and model name
- key = (store_trigger[priority_], store_trigger[model_name_])
+ key = (function[priority_], function[model_name_])
for target_id in target_ids:
- to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
+ mapping.setdefault(key, {}).setdefault(target_id,set()).add(tuple(function))
- # Here to_compute_map looks like:
- # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
- # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
- # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
+ # Here mapping looks like:
+ # { (10, 'model_a') : { target_id1: [ (function_1_tuple, function_2_tuple) ], ... }
+ # (20, 'model_a') : { target_id2: [ (function_3_tuple, function_4_tuple) ], ... }
+ # (99, 'model_a') : { target_id1: [ (function_5_tuple, function_6_tuple) ], ... }
# }
# Now we need to generate the batch function calls list
# call_map =
# { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
call_map = {}
- for ((priority,model), id_map) in to_compute_map.iteritems():
- trigger_ids_maps = {}
+ for ((priority,model), id_map) in mapping.iteritems():
+ functions_ids_maps = {}
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
- for target_id, triggers in id_map.iteritems():
- trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
- for triggers, target_ids in trigger_ids_maps.iteritems():
- call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
- [t[func_field_to_compute_] for t in triggers]))
+ for id, functions in id_map.iteritems():
+ functions_ids_maps.setdefault(tuple(functions), []).append(id)
+ for functions, ids in functions_ids_maps.iteritems():
+ call_map.setdefault((priority,model),[]).append((priority, model, ids,
+ [f[func_field_to_compute_] for f in functions]))
ordered_keys = call_map.keys()
ordered_keys.sort()
result = []
:param query: the current query object
"""
- if uid == SUPERUSER_ID:
- return
-
def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None):
""" :param string parent_model: string of the parent model
:param model child_object: model object, base of the rule application
context = {}
# avoid recursion through already copied records in case of circular relationship
- seen_map = context.setdefault('__copy_data_seen', {})
- if id in seen_map.setdefault(self._name, []):
+ seen_map = context.setdefault('__copy_data_seen',{})
+ if id in seen_map.setdefault(self._name,[]):
return
seen_map[self._name].append(id)
else:
default['state'] = self._defaults['state']
+ context_wo_lang = context.copy()
+ if 'lang' in context:
+ del context_wo_lang['lang']
+ data = self.read(cr, uid, [id,], context=context_wo_lang)
+ if data:
+ data = data[0]
+ else:
+ raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
+
# build a black list of fields that should not be copied
blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
def blacklist_given_fields(obj):
blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
else:
blacklist_given_fields(self.pool[other])
- # blacklist deprecated fields
- for name, field in obj._columns.items():
- if field.deprecated:
- blacklist.add(name)
-
blacklist_given_fields(self)
- fields_to_read = [f for f in self.check_field_access_rights(cr, uid, 'read', None)
- if f not in blacklist]
- data = self.read(cr, uid, [id], fields_to_read, context=context)
- if data:
- data = data[0]
- else:
- raise IndexError(_("Record #%d of %s not found, cannot copy!") % (id, self._name))
-
res = dict(default)
for f, colinfo in self._all_columns.items():
field = colinfo.column
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fields = self.fields_get(cr, uid, context=context)
+ translation_records = []
for field_name, field_def in fields.items():
- # removing the lang to compare untranslated values
- context_wo_lang = dict(context, lang=None)
- old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
# we must recursively copy the translations for o2o and o2m
if field_def['type'] == 'one2many':
target_obj = self.pool[field_def['relation']]
+ old_record, new_record = self.read(cr, uid, [old_id, new_id], [field_name], context=context)
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
- old_children = sorted(r.id for r in old_record[field_name])
- new_children = sorted(r.id for r in new_record[field_name])
+ old_children = sorted(old_record[field_name])
+ new_children = sorted(new_record[field_name])
for (old_child, new_child) in zip(old_children, new_children):
target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
# and for translatable fields we keep them for copy
elif field_def.get('translate'):
+ trans_name = ''
if field_name in self._columns:
trans_name = self._name + "," + field_name
- target_id = new_id
- source_id = old_id
elif field_name in self._inherit_fields:
trans_name = self._inherit_fields[field_name][0] + "," + field_name
- # get the id of the parent record to set the translation
- inherit_field_name = self._inherit_fields[field_name][1]
- target_id = new_record[inherit_field_name].id
- source_id = old_record[inherit_field_name].id
- else:
- continue
+ if trans_name:
+ trans_ids = trans_obj.search(cr, uid, [
+ ('name', '=', trans_name),
+ ('res_id', '=', old_id)
+ ])
+ translation_records.extend(trans_obj.read(cr, uid, trans_ids, context=context))
- trans_ids = trans_obj.search(cr, uid, [
- ('name', '=', trans_name),
- ('res_id', '=', source_id)
- ])
- user_lang = context.get('lang')
- for record in trans_obj.read(cr, uid, trans_ids, context=context):
- del record['id']
- # remove source to avoid triggering _set_src
- del record['source']
- record.update({'res_id': target_id})
- if user_lang and user_lang == record['lang']:
- # 'source' to force the call to _set_src
- # 'value' needed if value is changed in copy(), want to see the new_value
- record['source'] = old_record[field_name]
- record['value'] = new_record[field_name]
- trans_obj.create(cr, uid, record, context=context)
+ for record in translation_records:
+ del record['id']
+ record['res_id'] = new_id
+ trans_obj.create(cr, uid, record, context=context)
def copy(self, cr, uid, id, default=None, context=None):
"""
if type(ids) in (int, long):
ids = [ids]
- if not ids:
- return []
query = 'SELECT id FROM "%s"' % self._table
cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
return [x[0] for x in cr.fetchall()]
return False
return True
- def _check_m2m_recursion(self, cr, uid, ids, field_name):
- """
- Verifies that there is no loop in a hierarchical structure of records,
- by following the parent relationship using the **parent** field until a loop
- is detected or until a top-level record is found.
-
- :param cr: database cursor
- :param uid: current user id
- :param ids: list of ids of records to check
- :param field_name: field to check
- :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
- """
-
- field = self._all_columns.get(field_name)
- field = field.column if field else None
- if not field or field._type != 'many2many' or field._obj != self._name:
- # field must be a many2many on itself
- raise ValueError('invalid field_name: %r' % (field_name,))
-
- query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
- ids_parent = ids[:]
- while ids_parent:
- ids_parent2 = []
- for i in range(0, len(ids_parent), cr.IN_MAX):
- j = i + cr.IN_MAX
- sub_ids_parent = ids_parent[i:j]
- cr.execute(query, (tuple(sub_ids_parent),))
- ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
- ids_parent = ids_parent2
- for i in ids_parent:
- if i in ids:
- return False
- return True
-
def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID(s) of any database record.
record_ids = self.search(cr, uid, domain or [], offset, limit or False, order or False, context or {})
if not record_ids:
return []
-
- if fields and fields == ['id']:
- # shortcut read if we only want the ids
- return [{'id': id} for id in record_ids]
-
result = self.read(cr, uid, record_ids, fields or [], context or {})
# reorder read
if len(result) >= 1: