- def check_access_rule(self, cr, uid, ids, operation, context=None):
- """Verifies that the operation given by ``operation`` is allowed for the user
- according to ir.rules.
-
- :param operation: one of ``write``, ``unlink``
- :raise except_orm: * if current ir.rules do not permit this operation.
- :return: None if the operation is allowed
- """
- raise NotImplementedError(_('The check_access_rule method is not implemented on this object !'))
-
-class orm_memory(orm_template):
-
- _protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
- _inherit_fields = {}
- _max_count = None
- _max_hours = None
- _check_time = 20
-
- @classmethod
- def createInstance(cls, pool, cr):
- return cls.makeInstance(pool, cr, ['_columns', '_defaults'])
-
- def __init__(self, pool, cr):
- super(orm_memory, self).__init__(pool, cr)
- self.datas = {}
- self.next_id = 0
- self.check_id = 0
- self._max_count = config.get('osv_memory_count_limit')
- self._max_hours = config.get('osv_memory_age_limit')
- cr.execute('delete from wkf_instance where res_type=%s', (self._name,))
-
- def _check_access(self, uid, object_id, mode):
- if uid != 1 and self.datas[object_id]['internal.create_uid'] != uid:
- raise except_orm(_('AccessError'), '%s access is only allowed on your own records for osv_memory objects except for the super-user' % mode.capitalize())
-
- def vaccum(self, cr, uid, force=False):
- """Run the vaccuum cleaning system, expiring and removing old records from the
- virtual osv_memory tables if the "max count" or "max age" conditions are enabled
- and have been reached. This method can be called very often (e.g. everytime a record
- is created), but will only actually trigger the cleanup process once out of
- "_check_time" times (by default once out of 20 calls)."""
- self.check_id += 1
- if (not force) and (self.check_id % self._check_time):
- return True
- tounlink = []
-
- # Age-based expiration
- if self._max_hours:
- max = time.time() - self._max_hours * 60 * 60
- for k,v in self.datas.iteritems():
- if v['internal.date_access'] < max:
- tounlink.append(k)
- self.unlink(cr, ROOT_USER_ID, tounlink)
-
- # Count-based expiration
- if self._max_count and len(self.datas) > self._max_count:
- # sort by access time to remove only the first/oldest ones in LRU fashion
- records = self.datas.items()
- records.sort(key=lambda x:x[1]['internal.date_access'])
- self.unlink(cr, ROOT_USER_ID, [x[0] for x in records[:len(self.datas)-self._max_count]])
-
- return True
-
- def read(self, cr, user, ids, fields_to_read=None, context=None, load='_classic_read'):
- if not context:
- context = {}
- if not fields_to_read:
- fields_to_read = self._columns.keys()
- result = []
- if self.datas:
- ids_orig = ids
- if isinstance(ids, (int, long)):
- ids = [ids]
- for id in ids:
- r = {'id': id}
- for f in fields_to_read:
- record = self.datas.get(id)
- if record:
- self._check_access(user, id, 'read')
- r[f] = record.get(f, False)
- if r[f] and isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
- r[f] = len(r[f])
- result.append(r)
- if id in self.datas:
- self.datas[id]['internal.date_access'] = time.time()
- fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read)
- for f in fields_post:
- res2 = self._columns[f].get_memory(cr, self, ids, f, user, context=context, values=result)
- for record in result:
- record[f] = res2[record['id']]
- if isinstance(ids_orig, (int, long)):
- return result[0]
- return result
-
- def write(self, cr, user, ids, vals, context=None):
- if not ids:
- return True
- vals2 = {}
- upd_todo = []
- for field in vals:
- if self._columns[field]._classic_write:
- vals2[field] = vals[field]
- else:
- upd_todo.append(field)
- for object_id in ids:
- self._check_access(user, object_id, mode='write')
- self.datas[object_id].update(vals2)
- self.datas[object_id]['internal.date_access'] = time.time()
- for field in upd_todo:
- self._columns[field].set_memory(cr, self, object_id, field, vals[field], user, context)
- self._validate(cr, user, [object_id], context)
- wf_service = netsvc.LocalService("workflow")
- wf_service.trg_write(user, self._name, object_id, cr)
- return object_id
-
- def create(self, cr, user, vals, context=None):
- self.vaccum(cr, user)
- self.next_id += 1
- id_new = self.next_id
-
- vals = self._add_missing_default_values(cr, user, vals, context)
-
- vals2 = {}
- upd_todo = []
- for field in vals:
- if self._columns[field]._classic_write:
- vals2[field] = vals[field]
- else:
- upd_todo.append(field)
- self.datas[id_new] = vals2
- self.datas[id_new]['internal.date_access'] = time.time()
- self.datas[id_new]['internal.create_uid'] = user
-
- for field in upd_todo:
- self._columns[field].set_memory(cr, self, id_new, field, vals[field], user, context)
- self._validate(cr, user, [id_new], context)
- if self._log_create and not (context and context.get('no_store_function', False)):
- message = self._description + \
- " '" + \
- self.name_get(cr, user, [id_new], context=context)[0][1] + \
- "' "+ _("created.")
- self.log(cr, user, id_new, message, True, context=context)
- wf_service = netsvc.LocalService("workflow")
- wf_service.trg_create(user, self._name, id_new, cr)
- return id_new
-
- def _where_calc(self, cr, user, args, active_test=True, context=None):
- if not context:
- context = {}
- args = args[:]
- res = []
- # if the object has a field named 'active', filter out all inactive
- # records unless they were explicitely asked for
- if 'active' in self._columns and (active_test and context.get('active_test', True)):
- if args:
- active_in_args = False
- for a in args:
- if a[0] == 'active':
- active_in_args = True
- if not active_in_args:
- args.insert(0, ('active', '=', 1))
- else:
- args = [('active', '=', 1)]
- if args:
- import expression
- e = expression.expression(args)
- e.parse(cr, user, self, context)
- res = e.exp
- return res or []