cache.setdefault(table._name, {})
self._data = cache[table._name]
- if not (id and isinstance(id, (int, long,))):
- raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
+ # if not (id and isinstance(id, (int, long,))):
+ # raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
# if not table.exists(cr, uid, id, context):
# raise BrowseRecordError(_('Object %s does not exists') % (self,))
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
+ fields.serialized: 'text',
}
def get_pg_type(f, type_override=None):
for rec in cr.dictfetchall():
cols[rec['name']] = rec
- for (k, f) in self._columns.items():
+ ir_model_fields_obj = self.pool.get('ir.model.fields')
+
+ # sparse field should be created at the end, as it depends on its serialized field already existing
+ model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
+ for (k, f) in model_fields:
vals = {
'model_id': model_id,
'model': self._name,
'selectable': (f.selectable and 1) or 0,
'translate': (f.translate and 1) or 0,
'relation_field': (f._type=='one2many' and isinstance(f, fields.one2many)) and f._fields_id or '',
+ 'serialization_field_id': None,
}
+ if getattr(f, 'serialization_field', None):
+ # resolve link to serialization_field if specified by name
+ serialization_field_id = ir_model_fields_obj.search(cr, 1, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
+ if not serialization_field_id:
+ raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
+ vals['serialization_field_id'] = serialization_field_id[0]
+
# When its a custom field,it does not contain f.select
if context.get('field_state', 'base') == 'manual':
if context.get('field_name', '') == k:
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
- relation,view_load,state,select_level,relation_field, translate
+ relation,view_load,state,select_level,relation_field, translate, serialization_field_id
) VALUES (
- %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
+ %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], bool(vals['view_load']), 'base',
- vals['select_level'], vals['relation_field'], bool(vals['translate'])
+ vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.commit()
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
- view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s
+ view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'], bool(vals['view_load']),
- vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['model'], vals['name']
+ vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
))
break
cr.commit()
#'select': int(field['select_level'])
}
- if field['ttype'] == 'selection':
+ if field['serialization_field_id']:
+ cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
+ attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
+ if field['ttype'] in ['many2one', 'one2many', 'many2many']:
+ attrs.update({'relation': field['relation']})
+ self._columns[field['name']] = fields.sparse(**attrs)
+ elif field['ttype'] == 'selection':
self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
elif field['ttype'] == 'reference':
self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
else:
return False
+ def _get_xml_id(self, cr, uid, r):
+ model_data = self.pool.get('ir.model.data')
+ data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])])
+ if len(data_ids):
+ d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
+ if d['module']:
+ r = '%s.%s' % (d['module'], d['name'])
+ else:
+ r = d['name']
+ else:
+ postfix = 0
+ while True:
+ n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
+ if not model_data.search(cr, uid, [('name', '=', n)]):
+ break
+ postfix += 1
+ model_data.create(cr, uid, {
+ 'name': n,
+ 'model': self._name,
+ 'res_id': r['id'],
+ 'module': '__export__',
+ })
+ r = '__export__.'+n
+ return r
+
lines = []
data = map(lambda x: '', range(len(fields)))
done = []
r = row
i = 0
while i < len(f):
+ cols = False
if f[i] == '.id':
r = r['id']
elif f[i] == 'id':
- model_data = self.pool.get('ir.model.data')
- data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])])
- if len(data_ids):
- d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
- if d['module']:
- r = '%s.%s' % (d['module'], d['name'])
- else:
- r = d['name']
- else:
- postfix = 0
- while True:
- n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
- if not model_data.search(cr, uid, [('name', '=', n)]):
- break
- postfix += 1
- model_data.create(cr, uid, {
- 'name': n,
- 'model': self._name,
- 'res_id': r['id'],
- 'module': '__export__',
- })
- r = n
+ r = _get_xml_id(self, cr, uid, r)
else:
r = r[f[i]]
# To display external name of selection field when its exported
- cols = False
if f[i] in self._columns.keys():
cols = self._columns[f[i]]
elif f[i] in self._inherit_fields.keys():
if [x for x in fields2 if x]:
break
done.append(fields2)
+ if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'):
+ data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r])
+ break
+
for row2 in r:
- lines2 = self.__export_row(cr, uid, row2, fields2,
+ lines2 = row2._model.__export_row(cr, uid, row2, fields2,
context)
if first:
for fpos2 in range(len(fields)):
newfd = relation_obj.fields_get( cr, uid, context=context )
pos = position
- res = many_ids(line[i], relation, current_module, mode)
+ res = []
first = 0
while pos < len(datas):
warning += w2
first += 1
- if data_res_id2:
- res.append((4, data_res_id2))
-
if (not newrow) or not reduce(lambda x, y: x or y, newrow.values(), 0):
break
group_count = group_by = groupby
if groupby:
if fget.get(groupby):
- if fget[groupby]['type'] in ('date', 'datetime'):
- flist = "to_char(%s,'yyyy-mm') as %s " % (qualified_groupby_field, groupby)
- groupby = "to_char(%s,'yyyy-mm')" % (qualified_groupby_field)
- qualified_groupby_field = groupby
+ groupby_type = fget[groupby]['type']
+ if groupby_type in ('date', 'datetime'):
+ qualified_groupby_field = "to_char(%s,'yyyy-mm')" % qualified_groupby_field
+ flist = "%s as %s " % (qualified_groupby_field, groupby)
+ elif groupby_type == 'boolean':
+ qualified_groupby_field = "coalesce(%s,false)" % qualified_groupby_field
+ flist = "%s as %s " % (qualified_groupby_field, groupby)
else:
flist = qualified_groupby_field
else:
self.check_unlink(cr, uid)
- properties = self.pool.get('ir.property')
+ ir_property = self.pool.get('ir.property')
+
+ # Check if the records are used as default properties.
domain = [('res_id', '=', False),
('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
]
- if properties.search(cr, uid, domain, context=context):
+ if ir_property.search(cr, uid, domain, context=context):
raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
+ # Delete the records' properties.
+ property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
+ ir_property.unlink(cr, uid, property_ids, context=context)
+
wf_service = netsvc.LocalService("workflow")
for oid in ids:
wf_service.trg_delete(uid, self._name, oid, cr)
if readonly[0][0] >= 1:
edit = True
break
- elif readonly[0][0] == 0:
- edit = False
- else:
- edit = False
if not edit:
vals.pop(field)
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+##############################################################################
+#
+# OpenERP, Open Source Management Solution
+# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+##############################################################################
-import os
-import re
-import sys
-from setuptools import setup, find_packages
+import glob, os, re, setuptools, sys
+from os.path import join, isfile
-execfile('addons/web/common/release.py')
+# List all data files
+def data():
+ files = []
+ for root, dirnames, filenames in os.walk('openerp'):
+ for filename in filenames:
+ if not re.match(r'.*(\.pyc|\.pyo|\~)$',filename):
+ files.append(os.path.join(root, filename))
+ d = {}
+ for v in files:
+ k=os.path.dirname(v)
+ if k in d:
+ d[k].append(v)
+ else:
+ d[k]=[v]
+ r = d.items()
++ if os.name == 'nt':
++ r.append(("Microsoft.VC90.CRT", glob.glob('C:\Microsoft.VC90.CRT\*.*')))
+ return r
-version_dash_incompatible = False
-if 'bdist_rpm' in sys.argv:
- version_dash_incompatible = True
-try:
- import py2exe
- from py2exe_utils import opts
- version_dash_incompatible = True
-except ImportError:
- opts = {}
-if version_dash_incompatible:
- version = version.split('-')[0]
+def gen_manifest():
+ file_list="\n".join(data())
+ open('MANIFEST','w').write(file_list)
-FILE_PATTERNS = \
- r'.+\.(py|cfg|po|pot|mo|txt|rst|gif|png|jpg|ico|mako|html|js|css|htc|swf)$'
-def find_data_files(source, patterns=FILE_PATTERNS):
- file_matcher = re.compile(patterns, re.I)
- out = []
- for base, _, files in os.walk(source):
- cur_files = []
- for f in files:
- if file_matcher.match(f):
- cur_files.append(os.path.join(base, f))
- if cur_files:
- out.append(
- (base, cur_files))
++if os.name == 'nt':
++ sys.path.append("C:\Microsoft.VC90.CRT")
+
- return out
+def py2exe_options():
+ if os.name == 'nt':
+ import py2exe
+ return {
- "console" : [ { "script": "openerp-server", "icon_resources": [(1, join("pixmaps","openerp-icon.ico"))], }],
++ "console" : [ { "script": "openerp-server", "icon_resources": [(1, join("install","openerp-icon.ico"))], }],
+ 'options' : {
+ "py2exe": {
+ "skip_archive": 1,
+ "optimize": 2,
+ "dist_dir": 'dist',
- "packages": [ "DAV", "HTMLParser", "PIL", "asynchat", "asyncore", "commands", "dateutil", "decimal", "email", "encodings", "imaplib", "lxml", "lxml._elementpath", "lxml.builder", "lxml.etree", "lxml.objectify", "mako", "openerp", "poplib", "pychart", "pydot", "pyparsing", "reportlab", "select", "simplejson", "smtplib", "uuid", "vatnumber" "vobject", "xml", "xml", "xml.dom", "xml.xpath", "yaml", ],
++ "packages": [ "DAV", "HTMLParser", "PIL", "asynchat", "asyncore", "commands", "dateutil", "decimal", "email", "encodings", "imaplib", "lxml", "lxml._elementpath", "lxml.builder", "lxml.etree", "lxml.objectify", "mako", "openerp", "poplib", "pychart", "pydot", "pyparsing", "reportlab", "select", "simplejson", "smtplib", "uuid", "vatnumber", "vobject", "xml", "xml.dom", "yaml", ],
+ "excludes" : ["Tkconstants","Tkinter","tcl"],
+ }
+ }
+ }
+ else:
+ return {}
-setup(
- name=name,
- version=version,
- description=description,
- long_description=long_description,
- author=author,
- author_email=author_email,
- url=url,
- download_url=download_url,
- license=license,
- install_requires=[
- "Babel >= 0.9.6",
- "simplejson >= 2.0.9",
- "python-dateutil >= 1.4.1, < 2",
- "pytz",
- "werkzeug == 0.7",
- ],
- tests_require=[
- 'unittest2',
- 'mock',
- ],
- test_suite = 'unittest2.collector',
- zip_safe=False,
- packages=find_packages(),
- classifiers=[
- 'Development Status :: 6 - Production/Stable',
- 'Operating System :: OS Independent',
- 'Programming Language :: Python',
- 'Environment :: Web Environment',
- 'Topic :: Office/Business :: Financial',
- ],
- scripts=['openerp-web'],
- data_files=(find_data_files('addons')
- + opts.pop('data_files', [])
- ),
- **opts
+execfile(join(os.path.dirname(__file__), 'openerp', 'release.py'))
+
+setuptools.setup(
+ name = 'openerp',
+ version = version,
+ description = description,
+ long_description = long_desc,
+ url = url,
+ author = author,
+ author_email = author_email,
+ classifiers = filter(None, classifiers.split("\n")),
+ license = license,
+ scripts = ['openerp-server'],
+ data_files = data(),
+ packages = setuptools.find_packages(),
+ #include_package_data = True,
+ install_requires = [
+ # TODO the pychart package we include in openerp corresponds to PyChart 1.37.
+ # It seems there is a single difference, which is a spurious print in generate_docs.py.
+ # It is probably safe to move to PyChart 1.39 (the latest one).
+ # (Let setup.py choose the latest one, and we should check we can remove pychart from
+ # our tree.) http://download.gna.org/pychart/
+ # TODO 'pychart',
+ 'babel',
+ 'feedparser',
+ 'gdata',
+ 'lxml',
+ 'mako',
+ 'psycopg2',
+ 'pydot',
- 'python-dateutil',
++ 'python-dateutil < 2',
+ 'python-ldap',
+ 'python-openid',
+ 'pytz',
+ 'pywebdav',
+ 'pyyaml',
+ 'reportlab',
+ 'simplejson',
+ 'vatnumber',
+ 'vobject',
+ 'werkzeug',
+ 'zsi',
+ ],
+ extras_require = {
+ 'SSL' : ['pyopenssl'],
+ },
+ **py2exe_options()
)
+
+
+# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: