X-Git-Url: http://git.inspyration.org/?a=blobdiff_plain;f=openerp%2Ftools%2Ftranslate.py;h=4eb6b397a7e2cb4638005aa2bfaee05f5579b0ed;hb=8ee2a89731a257e5980f8265bd6b29e8d455aa00;hp=523807333d16c0ab2b28504e88089c66f09eaabb;hpb=842cb1ad41d393a81d9a11a2168117d7836f7ec5;p=odoo%2Fodoo.git diff --git a/openerp/tools/translate.py b/openerp/tools/translate.py index 5238073..4eb6b39 100644 --- a/openerp/tools/translate.py +++ b/openerp/tools/translate.py @@ -23,16 +23,15 @@ import codecs import csv import fnmatch import inspect -import itertools import locale import os -import openerp.pooler as pooler import openerp.sql_db as sql_db import re import logging import tarfile import tempfile import threading +from babel.messages import extract from os.path import join from datetime import datetime @@ -40,9 +39,17 @@ from lxml import etree import config import misc -from misc import UpdateableStr from misc import SKIPPED_ELEMENT_TYPES import osutil +import openerp +from openerp import SUPERUSER_ID + +_logger = logging.getLogger(__name__) + +# used to notify web client that these translations should be loaded in the UI +WEB_TRANSLATION_COMMENT = "openerp-web" + +SKIPPED_ELEMENTS = ('script', 'style') _LOCALE2WIN32 = { 'af_ZA': 'Afrikaans_South Africa', @@ -50,7 +57,7 @@ _LOCALE2WIN32 = { 'ar_SA': 'Arabic_Saudi Arabia', 'eu_ES': 'Basque_Spain', 'be_BY': 'Belarusian_Belarus', - 'bs_BA': 'Serbian (Latin)', + 'bs_BA': 'Bosnian_Bosnia and Herzegovina', 'bg_BG': 'Bulgarian_Bulgaria', 'ca_ES': 'Catalan_Spain', 'hr_HR': 'Croatian_Croatia', @@ -87,7 +94,6 @@ _LOCALE2WIN32 = { 'lt_LT': 'Lithuanian_Lithuania', 'lat': 'Latvian_Latvia', 'ml_IN': 'Malayalam_India', - 'id_ID': 'Indonesian_indonesia', 'mi_NZ': 'Maori', 'mn': 'Cyrillic_Mongolian', 'no_NO': 'Norwegian_Norway', @@ -97,7 +103,6 @@ _LOCALE2WIN32 = { 'pt_BR': 'Portuguese_Brazil', 'ro_RO': 'Romanian_Romania', 'ru_RU': 'Russian_Russia', - 'mi_NZ': 'Maori', 'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro', 'sk_SK': 'Slovak_Slovakia', 'sl_SI': 'Slovenian_Slovenia', @@ -125,7 +130,6 @@ _LOCALE2WIN32 = { 'sv_SE': 'Swedish_Sweden', 'ta_IN': 'English_Australia', 'th_TH': 'Thai_Thailand', - 'mi_NZ': 'Maori', 'tr_TR': 'Turkish_Turkey', 'uk_UA': 'Ukrainian_Ukraine', 'vi_VN': 'Vietnamese_Viet Nam', @@ -153,8 +157,6 @@ def translate(cr, name, source_type, lang, source=None): res = res_trans and res_trans[0] or False return res -logger = logging.getLogger('translate') - class GettextAlias(object): def _get_db(self): @@ -163,38 +165,60 @@ class GettextAlias(object): if db_name: return sql_db.db_connect(db_name) - def _get_cr(self, frame): - is_new_cr = False - cr = frame.f_locals.get('cr', frame.f_locals.get('cursor')) - if not cr: - s = frame.f_locals.get('self', {}) - cr = getattr(s, 'cr', None) - if not cr: + def _get_cr(self, frame, allow_create=True): + # try, in order: cr, cursor, self.env.cr, self.cr + if 'cr' in frame.f_locals: + return frame.f_locals['cr'], False + if 'cursor' in frame.f_locals: + return frame.f_locals['cursor'], False + s = frame.f_locals.get('self') + if hasattr(s, 'env'): + return s.env.cr, False + if hasattr(s, 'cr'): + return s.cr, False + if allow_create: + # create a new cursor db = self._get_db() - if db: - cr = db.cursor() - is_new_cr = True - return cr, is_new_cr + if db is not None: + return db.cursor(), True + return None, False + + def _get_uid(self, frame): + # try, in order: uid, user, self.env.uid + if 'uid' in frame.f_locals: + return frame.f_locals['uid'] + if 'user' in frame.f_locals: + return int(frame.f_locals['user']) # user may be a record + s = frame.f_locals.get('self') + return s.env.uid def _get_lang(self, frame): + # try, in order: context.get('lang'), kwargs['context'].get('lang'), + # self.env.lang, self.localcontext.get('lang') lang = None - ctx = frame.f_locals.get('context') - if not ctx: - kwargs = frame.f_locals.get('kwargs') - if kwargs is None: - args = frame.f_locals.get('args') - if args and isinstance(args, (list, tuple)) \ - and isinstance(args[-1], dict): - ctx = args[-1] - elif isinstance(kwargs, dict): - ctx = kwargs.get('context') - if ctx: - lang = ctx.get('lang') + if frame.f_locals.get('context'): + lang = frame.f_locals['context'].get('lang') + if not lang: + kwargs = frame.f_locals.get('kwargs', {}) + if kwargs.get('context'): + lang = kwargs['context'].get('lang') if not lang: - s = frame.f_locals.get('self', {}) - c = getattr(s, 'localcontext', None) - if c: - lang = c.get('lang') + s = frame.f_locals.get('self') + if hasattr(s, 'env'): + lang = s.env.lang + if not lang: + if hasattr(s, 'localcontext'): + lang = s.localcontext.get('lang') + if not lang: + # Last resort: attempt to guess the language of the user + # Pitfall: some operations are performed in sudo mode, and we + # don't know the originial uid, so the language may + # be wrong when the admin language differs. + pool = getattr(s, 'pool', None) + (cr, dummy) = self._get_cr(frame, allow_create=False) + uid = self._get_uid(frame) + if pool and cr and uid: + lang = pool['res.users'].context_get(cr, uid)['lang'] return lang def __call__(self, source): @@ -213,14 +237,14 @@ class GettextAlias(object): cr, is_new_cr = self._get_cr(frame) if cr: # Try to use ir.translation to benefit from global cache if possible - pool = pooler.get_pool(cr.dbname) - res = pool.get('ir.translation')._get_source(cr, 1, None, ('code','sql_constraint'), lang, source) + registry = openerp.registry(cr.dbname) + res = registry['ir.translation']._get_source(cr, SUPERUSER_ID, None, ('code','sql_constraint'), lang, source) else: - logger.debug('no context cursor detected, skipping translation for "%r"', source) + _logger.debug('no context cursor detected, skipping translation for "%r"', source) else: - logger.debug('no translation language detected, skipping translation for "%r" ', source) + _logger.debug('no translation language detected, skipping translation for "%r" ', source) except Exception: - logger.debug('translation went wrong for "%r", skipped', source) + _logger.debug('translation went wrong for "%r", skipped', source) # if so, double-check the root/base translations filenames finally: if cr and is_new_cr: @@ -250,19 +274,18 @@ def unquote(str): # class to handle po files class TinyPoFile(object): def __init__(self, buffer): - self.logger = logging.getLogger('i18n') self.buffer = buffer def warn(self, msg, *args): - self.logger.warning(msg, *args) + _logger.warning(msg, *args) def __iter__(self): self.buffer.seek(0) self.lines = self._get_lines() - self.lines_count = len(self.lines); + self.lines_count = len(self.lines) self.first = True - self.tnrs= [] + self.extra_lines= [] return self def _get_lines(self): @@ -275,32 +298,44 @@ class TinyPoFile(object): return lines def cur_line(self): - return (self.lines_count - len(self.lines)) + return self.lines_count - len(self.lines) def next(self): - type = name = res_id = source = trad = None - - if self.tnrs: - type, name, res_id, source, trad = self.tnrs.pop(0) + trans_type = name = res_id = source = trad = None + if self.extra_lines: + trans_type, name, res_id, source, trad, comments = self.extra_lines.pop(0) if not res_id: res_id = '0' else: - tmp_tnrs = [] + comments = [] + targets = [] line = None fuzzy = False - while (not line): + while not line: if 0 == len(self.lines): raise StopIteration() line = self.lines.pop(0).strip() while line.startswith('#'): if line.startswith('#~ '): break - if line.startswith('#:'): - if ' ' in line[2:].strip(): - for lpart in line[2:].strip().split(' '): - tmp_tnrs.append(lpart.strip().split(':',2)) - else: - tmp_tnrs.append( line[2:].strip().split(':',2) ) + if line.startswith('#.'): + line = line[2:].strip() + if not line.startswith('module:'): + comments.append(line) + elif line.startswith('#:'): + # Process the `reference` comments. Each line can specify + # multiple targets (e.g. model, view, code, selection, + # ...). For each target, we will return an additional + # entry. + for lpart in line[2:].strip().split(' '): + trans_info = lpart.strip().split(':',2) + if trans_info and len(trans_info) == 2: + # looks like the translation trans_type is missing, which is not + # unexpected because it is not a GetText standard. Default: 'code' + trans_info[:0] = ['code'] + if trans_info and len(trans_info) == 3: + # this is a ref line holding the destination info (model, field, record) + targets.append(trans_info) elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'): fuzzy = True line = self.lines.pop(0).strip() @@ -323,7 +358,7 @@ class TinyPoFile(object): # if the source is "" and it's the first msgid, it's the special # msgstr with the informations about the traduction and the # traductor; we skip it - self.tnrs = [] + self.extra_lines = [] while line: line = self.lines.pop(0).strip() return self.next() @@ -340,10 +375,14 @@ class TinyPoFile(object): trad += unquote(line) line = self.lines.pop(0).strip() - if tmp_tnrs and not fuzzy: - type, name, res_id = tmp_tnrs.pop(0) - for t, n, r in tmp_tnrs: - self.tnrs.append((t, n, r, source, trad)) + if targets and not fuzzy: + # Use the first target for the current entry (returned at the + # end of this next() call), and keep the others to generate + # additional entries (returned the next next() calls). + trans_type, name, res_id = targets.pop(0) + for t, n, r in targets: + if t == trans_type == 'code': continue + self.extra_lines.append((t, n, r, source, trad, comments)) self.first = False @@ -352,7 +391,7 @@ class TinyPoFile(object): self.warn('Missing "#:" formated comment at line %d for the following source:\n\t%s', self.cur_line(), source[:30]) return self.next() - return type, name, res_id, source, trad + return trans_type, name, res_id, source, trad, '\n'.join(comments) def write_infos(self, modules): import openerp.release as release @@ -381,11 +420,13 @@ class TinyPoFile(object): } ) - def write(self, modules, tnrs, source, trad): + def write(self, modules, tnrs, source, trad, comments=None): plurial = len(modules) > 1 and 's' or '' self.buffer.write("#. module%s: %s\n" % (plurial, ', '.join(modules))) + if comments: + self.buffer.write(''.join(('#. %s\n' % c for c in comments))) code = False for typy, name, res_id in tnrs: @@ -412,44 +453,48 @@ class TinyPoFile(object): def trans_export(lang, modules, buffer, format, cr): - def _process(format, modules, rows, buffer, lang, newlang): + def _process(format, modules, rows, buffer, lang): if format == 'csv': - writer=csv.writer(buffer, 'UNIX') - for row in rows: - writer.writerow(row) + writer = csv.writer(buffer, 'UNIX') + # write header first + writer.writerow(("module","type","name","res_id","src","value")) + for module, type, name, res_id, src, trad, comments in rows: + # Comments are ignored by the CSV writer + writer.writerow((module, type, name, res_id, src, trad)) elif format == 'po': - rows.pop(0) writer = TinyPoFile(buffer) writer.write_infos(modules) # we now group the translations by source. That means one translation per source. grouped_rows = {} - for module, type, name, res_id, src, trad in rows: + for module, type, name, res_id, src, trad, comments in rows: row = grouped_rows.setdefault(src, {}) row.setdefault('modules', set()).add(module) - if ('translation' not in row) or (not row['translation']): + if not row.get('translation') and trad != src: row['translation'] = trad row.setdefault('tnrs', []).append((type, name, res_id)) + row.setdefault('comments', set()).update(comments) - for src, row in grouped_rows.items(): - writer.write(row['modules'], row['tnrs'], src, row['translation']) + for src, row in sorted(grouped_rows.items()): + if not lang: + # translation template, so no translation value + row['translation'] = '' + elif not row.get('translation'): + row['translation'] = src + writer.write(row['modules'], row['tnrs'], src, row['translation'], row['comments']) elif format == 'tgz': - rows.pop(0) rows_by_module = {} for row in rows: module = row[0] - # first row is the "header", as in csv, it will be popped - rows_by_module.setdefault(module, [['module', 'type', 'name', 'res_id', 'src', ''],]) - rows_by_module[module].append(row) - + rows_by_module.setdefault(module, []).append(row) tmpdir = tempfile.mkdtemp() for mod, modrows in rows_by_module.items(): tmpmoddir = join(tmpdir, mod, 'i18n') os.makedirs(tmpmoddir) - pofilename = (newlang and mod or lang) + ".po" + (newlang and 't' or '') + pofilename = (lang if lang else mod) + ".po" + ('t' if not lang else '') buf = file(join(tmpmoddir, pofilename), 'w') - _process('po', [mod], modrows, buf, lang, newlang) + _process('po', [mod], modrows, buf, lang) buf.close() tar = tarfile.open(fileobj=buffer, mode='w|gz') @@ -460,28 +505,31 @@ def trans_export(lang, modules, buffer, format, cr): raise Exception(_('Unrecognized extension: must be one of ' '.csv, .po, or .tgz (received .%s).' % format)) - newlang = not bool(lang) - if newlang: - lang = 'en_US' - trans = trans_generate(lang, modules, cr) - if newlang and format!='csv': - for trx in trans: - trx[-1] = '' - modules = set([t[0] for t in trans[1:]]) - _process(format, modules, trans, buffer, lang, newlang) - del trans + translations = trans_generate(lang, modules, cr) + modules = set(t[0] for t in translations) + _process(format, modules, translations, buffer, lang) + del translations def trans_parse_xsl(de): + return list(set(trans_parse_xsl_aux(de, False))) + +def trans_parse_xsl_aux(de, t): res = [] + for n in de: - if n.get("t"): - for m in n: - if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text: + t = t or n.get("t") + if t: + if isinstance(n, SKIPPED_ELEMENT_TYPES) or n.tag.startswith('{http://www.w3.org/1999/XSL/Transform}'): continue - l = m.text.strip().replace('\n',' ') - if len(l): - res.append(l.encode("utf8")) - res.extend(trans_parse_xsl(n)) + if n.text: + l = n.text.strip().replace('\n',' ') + if len(l): + res.append(l.encode("utf8")) + if n.tail: + l = n.tail.strip().replace('\n',' ') + if len(l): + res.append(l.encode("utf8")) + res.extend(trans_parse_xsl_aux(n, t)) return res def trans_parse_rml(de): @@ -497,22 +545,33 @@ def trans_parse_rml(de): res.extend(trans_parse_rml(n)) return res -def trans_parse_view(de): - res = [] - if de.tag == 'attribute' and de.get("name") == 'string': - if de.text: - res.append(de.text.encode("utf8")) - if de.get("string"): - res.append(de.get('string').encode("utf8")) - if de.get("help"): - res.append(de.get('help').encode("utf8")) - if de.get("sum"): - res.append(de.get('sum').encode("utf8")) - if de.get("confirm"): - res.append(de.get('confirm').encode("utf8")) - for n in de: - res.extend(trans_parse_view(n)) - return res +def _push(callback, term, source_line): + """ Sanity check before pushing translation terms """ + term = (term or "").strip().encode('utf8') + # Avoid non-char tokens like ':' '...' '.00' etc. + if len(term) > 8 or any(x.isalpha() for x in term): + callback(term, source_line) + +def trans_parse_view(element, callback): + """ Helper method to recursively walk an etree document representing a + regular view and call ``callback(term)`` for each translatable term + that is found in the document. + + :param ElementTree element: root of etree document to extract terms from + :param callable callback: a callable in the form ``f(term, source_line)``, + that will be called for each extracted term. + """ + for el in element.iter(): + if (not isinstance(el, SKIPPED_ELEMENT_TYPES) + and el.tag.lower() not in SKIPPED_ELEMENTS + and el.text): + _push(callback, el.text, el.sourceline) + if el.tail: + _push(callback, el.tail, el.sourceline) + for attr in ('string', 'help', 'sum', 'confirm', 'placeholder'): + value = el.get(attr) + if value: + _push(callback, value, el.sourceline) # tests whether an object is in a list of modules def in_modules(object_name, modules): @@ -528,15 +587,58 @@ def in_modules(object_name, modules): module = module_dict.get(module, module) return module in modules +def _extract_translatable_qweb_terms(element, callback): + """ Helper method to walk an etree document representing + a QWeb template, and call ``callback(term)`` for each + translatable term that is found in the document. + + :param etree._Element element: root of etree document to extract terms from + :param Callable callback: a callable in the form ``f(term, source_line)``, + that will be called for each extracted term. + """ + # not using elementTree.iterparse because we need to skip sub-trees in case + # the ancestor element had a reason to be skipped + for el in element: + if isinstance(el, SKIPPED_ELEMENT_TYPES): continue + if (el.tag.lower() not in SKIPPED_ELEMENTS + and "t-js" not in el.attrib + and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib) + and el.get("t-translation", '').strip() != "off"): + _push(callback, el.text, el.sourceline) + for att in ('title', 'alt', 'label', 'placeholder'): + if att in el.attrib: + _push(callback, el.attrib[att], el.sourceline) + _extract_translatable_qweb_terms(el, callback) + _push(callback, el.tail, el.sourceline) + +def babel_extract_qweb(fileobj, keywords, comment_tags, options): + """Babel message extractor for qweb template files. + + :param fileobj: the file-like object the messages should be extracted from + :param keywords: a list of keywords (i.e. function names) that should + be recognized as translation functions + :param comment_tags: a list of translator tags to search for and + include in the results + :param options: a dictionary of additional options (optional) + :return: an iterator over ``(lineno, funcname, message, comments)`` + tuples + :rtype: Iterable + """ + result = [] + def handle_text(text, lineno): + result.append((lineno, None, text, [])) + tree = etree.parse(fileobj) + _extract_translatable_qweb_terms(tree.getroot(), handle_text) + return result + def trans_generate(lang, modules, cr): - logger = logging.getLogger('i18n') dbname = cr.dbname - pool = pooler.get_pool(dbname) - trans_obj = pool.get('ir.translation') - model_data_obj = pool.get('ir.model.data') + registry = openerp.registry(dbname) + trans_obj = registry.get('ir.translation') + model_data_obj = registry.get('ir.model.data') uid = 1 - l = pool.models.items() + l = registry.models.items() l.sort() query = 'SELECT name, model, res_id, module' \ @@ -560,9 +662,13 @@ def trans_generate(lang, modules, cr): cr.execute(query, query_param) _to_translate = [] - def push_translation(module, type, name, id, source): - tuple = (module, source, name, id, type) - if source and tuple not in _to_translate: + def push_translation(module, type, name, id, source, comments=None): + tuple = (module, source, name, id, type, comments or []) + # empty and one-letter terms are ignored, they probably are not meant to be + # translated, and would be very hard to translate anyway. + if not source or len(source.strip()) <= 1: + return + if tuple not in _to_translate: _to_translate.append(tuple) def encode(s): @@ -570,78 +676,59 @@ def trans_generate(lang, modules, cr): return s.encode('utf8') return s + def push(mod, type, name, res_id, term): + term = (term or '').strip() + if len(term) > 2: + push_translation(mod, type, name, res_id, term) + + def get_root_view(xml_id): + view = model_data_obj.xmlid_to_object(cr, uid, xml_id) + if view: + while view.mode != 'primary': + view = view.inherit_id + xml_id = view.get_external_id(cr, uid).get(view.id, xml_id) + return xml_id + for (xml_name,model,res_id,module) in cr.fetchall(): module = encode(module) model = encode(model) xml_name = "%s.%s" % (module, encode(xml_name)) - if not pool.get(model): - logger.error("Unable to find object %r", model) + if model not in registry: + _logger.error("Unable to find object %r", model) continue - exists = pool.get(model).exists(cr, uid, res_id) + if not registry[model]._translate: + # explicitly disabled + continue + + exists = registry[model].exists(cr, uid, res_id) if not exists: - logger.warning("Unable to find object %r with id %d", model, res_id) + _logger.warning("Unable to find object %r with id %d", model, res_id) continue - obj = pool.get(model).browse(cr, uid, res_id) + obj = registry[model].browse(cr, uid, res_id) if model=='ir.ui.view': d = etree.XML(encode(obj.arch)) - for t in trans_parse_view(d): - push_translation(module, 'view', encode(obj.model), 0, t) + if obj.type == 'qweb': + view_id = get_root_view(xml_name) + push_qweb = lambda t,l: push(module, 'view', 'website', view_id, t) + _extract_translatable_qweb_terms(d, push_qweb) + else: + push_view = lambda t,l: push(module, 'view', obj.model, xml_name, t) + trans_parse_view(d, push_view) elif model=='ir.actions.wizard': - service_name = 'wizard.'+encode(obj.wiz_name) - import openerp.netsvc as netsvc - if netsvc.Service._services.get(service_name): - obj2 = netsvc.Service._services[service_name] - for state_name, state_def in obj2.states.iteritems(): - if 'result' in state_def: - result = state_def['result'] - if result['type'] != 'form': - continue - name = "%s,%s" % (encode(obj.wiz_name), state_name) - - def_params = { - 'string': ('wizard_field', lambda s: [encode(s)]), - 'selection': ('selection', lambda s: [encode(e[1]) for e in ((not callable(s)) and s or [])]), - 'help': ('help', lambda s: [encode(s)]), - } - - # export fields - if not result.has_key('fields'): - logger.warning("res has no fields: %r", result) - continue - for field_name, field_def in result['fields'].iteritems(): - res_name = name + ',' + field_name - - for fn in def_params: - if fn in field_def: - transtype, modifier = def_params[fn] - for val in modifier(field_def[fn]): - push_translation(module, transtype, res_name, 0, val) - - # export arch - arch = result['arch'] - if arch and not isinstance(arch, UpdateableStr): - d = etree.XML(arch) - for t in trans_parse_view(d): - push_translation(module, 'wizard_view', name, 0, t) - - # export button labels - for but_args in result['state']: - button_name = but_args[0] - button_label = but_args[1] - res_name = name + ',' + button_name - push_translation(module, 'wizard_button', res_name, 0, button_label) + pass # TODO Can model really be 'ir.actions.wizard' ? elif model=='ir.model.fields': try: field_name = encode(obj.name) except AttributeError, exc: - logger.error("name error in %s: %s", xml_name, str(exc)) + _logger.error("name error in %s: %s", xml_name, str(exc)) continue - objmodel = pool.get(obj.model) - if not objmodel or not field_name in objmodel._columns: + objmodel = registry.get(obj.model) + if (objmodel is None or field_name not in objmodel._columns + or not objmodel._translate): continue field_def = objmodel._columns[field_name] @@ -690,189 +777,164 @@ def trans_generate(lang, modules, cr): finally: report_file.close() except (IOError, etree.XMLSyntaxError): - logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname) + _logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname) - for field_name,field_def in obj._table._columns.items(): + for field_name, field_def in obj._columns.items(): + if model == 'ir.model' and field_name == 'name' and obj.name == obj.model: + # ignore model name if it is the technical one, nothing to translate + continue if field_def.translate: name = model + "," + field_name try: - trad = getattr(obj, field_name) or '' + term = obj[field_name] or '' except: - trad = '' - push_translation(module, 'model', name, xml_name, encode(trad)) + term = '' + push_translation(module, 'model', name, xml_name, encode(term)) # End of data for ir.model.data query results cr.execute(query_models, query_param) def push_constraint_msg(module, term_type, model, msg): - # Check presence of __call__ directly instead of using - # callable() because it will be deprecated as of Python 3.0 if not hasattr(msg, '__call__'): - push_translation(module, term_type, model, 0, encode(msg)) - - for (model_id, model, module) in cr.fetchall(): - module = encode(module) - model = encode(model) + push_translation(encode(module), term_type, encode(model), 0, encode(msg)) + + def push_local_constraints(module, model, cons_type='sql_constraints'): + """Climb up the class hierarchy and ignore inherited constraints + from other modules""" + term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint' + msg_pos = 2 if cons_type == 'sql_constraints' else 1 + for cls in model.__class__.__mro__: + if getattr(cls, '_module', None) != module: + continue + constraints = getattr(cls, '_local_' + cons_type, []) + for constraint in constraints: + push_constraint_msg(module, term_type, model._name, constraint[msg_pos]) + + for (_, model, module) in cr.fetchall(): + if model not in registry: + _logger.error("Unable to find object %r", model) + continue - model_obj = pool.get(model) + model_obj = registry[model] - if not model_obj: - logging.getLogger("i18n").error("Unable to find object %r", model) - continue + if model_obj._constraints: + push_local_constraints(module, model_obj, 'constraints') - for constraint in getattr(model_obj, '_constraints', []): - push_constraint_msg(module, 'constraint', model, constraint[1]) - - for constraint in getattr(model_obj, '_sql_constraints', []): - push_constraint_msg(module, 'sql_constraint', model, constraint[2]) - - # parse source code for _() calls - def get_module_from_path(path, mod_paths=None): - if not mod_paths: - # First, construct a list of possible paths - def_path = os.path.abspath(os.path.join(config.config['root_path'], 'addons')) # default addons path (base) - ad_paths= map(lambda m: os.path.abspath(m.strip()),config.config['addons_path'].split(',')) - mod_paths=[def_path] - for adp in ad_paths: - mod_paths.append(adp) - if not os.path.isabs(adp): - mod_paths.append(adp) - elif adp.startswith(def_path): - mod_paths.append(adp[len(def_path)+1:]) - for mp in mod_paths: - if path.startswith(mp) and (os.path.dirname(path) != mp): - path = path[len(mp)+1:] - return path.split(os.path.sep)[0] - return 'base' # files that are not in a module are considered as being in 'base' module + if model_obj._sql_constraints: + push_local_constraints(module, model_obj, 'sql_constraints') - modobj = pool.get('ir.module.module') + modobj = registry['ir.module.module'] installed_modids = modobj.search(cr, uid, [('state', '=', 'installed')]) installed_modules = map(lambda m: m['name'], modobj.read(cr, uid, installed_modids, ['name'])) - root_path = os.path.join(config.config['root_path'], 'addons') - - apaths = map(os.path.abspath, map(str.strip, config.config['addons_path'].split(','))) - if root_path in apaths: - path_list = apaths - else : - path_list = [root_path,] + apaths - + path_list = list(openerp.modules.module.ad_paths) # Also scan these non-addon paths for bin_path in ['osv', 'report' ]: path_list.append(os.path.join(config.config['root_path'], bin_path)) - logger.debug("Scanning modules at paths: ", path_list) + _logger.debug("Scanning modules at paths: %s", path_list) - mod_paths = [] - join_dquotes = re.compile(r'([^\\])"[\s\\]*"', re.DOTALL) - join_quotes = re.compile(r'([^\\])\'[\s\\]*\'', re.DOTALL) - re_dquotes = re.compile(r'[^a-zA-Z0-9_]_\([\s]*"(.+?)"[\s]*?\)', re.DOTALL) - re_quotes = re.compile(r'[^a-zA-Z0-9_]_\([\s]*\'(.+?)\'[\s]*?\)', re.DOTALL) + mod_paths = list(path_list) - def export_code_terms_from_file(fname, path, root, terms_type): + def get_module_from_path(path): + for mp in mod_paths: + if path.startswith(mp) and (os.path.dirname(path) != mp): + path = path[len(mp)+1:] + return path.split(os.path.sep)[0] + return 'base' # files that are not in a module are considered as being in 'base' module + + def verified_module_filepaths(fname, path, root): fabsolutepath = join(root, fname) frelativepath = fabsolutepath[len(path):] - module = get_module_from_path(fabsolutepath, mod_paths=mod_paths) - is_mod_installed = module in installed_modules - if (('all' in modules) or (module in modules)) and is_mod_installed: - logger.debug("Scanning code of %s at module: %s", frelativepath, module) - src_file = misc.file_open(fabsolutepath, subdir='') + display_path = "addons%s" % frelativepath + module = get_module_from_path(fabsolutepath) + if ('all' in modules or module in modules) and module in installed_modules: + return module, fabsolutepath, frelativepath, display_path + return None, None, None, None + + def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code', + extra_comments=None, extract_keywords={'_': None}): + module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root) + extra_comments = extra_comments or [] + if module: + src_file = open(fabsolutepath, 'r') try: - code_string = src_file.read() + for extracted in extract.extract(extract_method, src_file, + keywords=extract_keywords): + # Babel 0.9.6 yields lineno, message, comments + # Babel 1.3 yields lineno, message, comments, context + lineno, message, comments = extracted[:3] + push_translation(module, trans_type, display_path, lineno, + encode(message), comments + extra_comments) + except Exception: + _logger.exception("Failed to extract terms from %s", fabsolutepath) finally: src_file.close() - if module in installed_modules: - frelativepath = str("addons" + frelativepath) - ite = re_dquotes.finditer(code_string) - code_offset = 0 - code_line = 1 - for i in ite: - src = i.group(1) - if src.startswith('""'): - assert src.endswith('""'), "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % (frelativepath, src[:30]) - src = src[2:-2] - else: - src = join_dquotes.sub(r'\1', src) - # try to count the lines from the last pos to our place: - code_line += code_string[code_offset:i.start(1)].count('\n') - # now, since we did a binary read of a python source file, we - # have to expand pythonic escapes like the interpreter does. - src = src.decode('string_escape') - push_translation(module, terms_type, frelativepath, code_line, encode(src)) - code_line += i.group(1).count('\n') - code_offset = i.end() # we have counted newlines up to the match end - - ite = re_quotes.finditer(code_string) - code_offset = 0 #reset counters - code_line = 1 - for i in ite: - src = i.group(1) - if src.startswith("''"): - assert src.endswith("''"), "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % (frelativepath, src[:30]) - src = src[2:-2] - else: - src = join_quotes.sub(r'\1', src) - code_line += code_string[code_offset:i.start(1)].count('\n') - src = src.decode('string_escape') - push_translation(module, terms_type, frelativepath, code_line, encode(src)) - code_line += i.group(1).count('\n') - code_offset = i.end() # we have counted newlines up to the match end for path in path_list: - logger.debug("Scanning files of modules at %s", path) + _logger.debug("Scanning files of modules at %s", path) for root, dummy, files in osutil.walksymlinks(path): - for fname in itertools.chain(fnmatch.filter(files, '*.py')): - export_code_terms_from_file(fname, path, root, 'code') - for fname in itertools.chain(fnmatch.filter(files, '*.mako')): - export_code_terms_from_file(fname, path, root, 'report') - - - out = [["module","type","name","res_id","src","value"]] # header + for fname in fnmatch.filter(files, '*.py'): + babel_extract_terms(fname, path, root) + # mako provides a babel extractor: http://docs.makotemplates.org/en/latest/usage.html#babel + for fname in fnmatch.filter(files, '*.mako'): + babel_extract_terms(fname, path, root, 'mako', trans_type='report') + # Javascript source files in the static/src/js directory, rest is ignored (libs) + if fnmatch.fnmatch(root, '*/static/src/js*'): + for fname in fnmatch.filter(files, '*.js'): + babel_extract_terms(fname, path, root, 'javascript', + extra_comments=[WEB_TRANSLATION_COMMENT], + extract_keywords={'_t': None, '_lt': None}) + # QWeb template files + if fnmatch.fnmatch(root, '*/static/src/xml*'): + for fname in fnmatch.filter(files, '*.xml'): + babel_extract_terms(fname, path, root, 'openerp.tools.translate:babel_extract_qweb', + extra_comments=[WEB_TRANSLATION_COMMENT]) + + out = [] _to_translate.sort() # translate strings marked as to be translated - for module, source, name, id, type in _to_translate: - trans = trans_obj._get_source(cr, uid, name, type, lang, source) - out.append([module, type, name, id, source, encode(trans) or '']) - + for module, source, name, id, type, comments in _to_translate: + trans = '' if not lang else trans_obj._get_source(cr, uid, name, type, lang, source) + out.append([module, type, name, id, source, encode(trans) or '', comments]) return out -def trans_load(cr, filename, lang, verbose=True, context=None): - logger = logging.getLogger('i18n') +def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None): try: - fileobj = open(filename,'r') - logger.info("loading %s", filename) + fileobj = misc.file_open(filename) + _logger.info("loading %s", filename) fileformat = os.path.splitext(filename)[-1][1:].lower() - r = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, context=context) + result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context) fileobj.close() - return r + return result except IOError: if verbose: - logger.error("couldn't read translation file %s", filename) + _logger.error("couldn't read translation file %s", filename) return None -def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, context=None): - """Populates the ir_translation table. Fixing the res_ids so that they point - correctly to ir_model_data is done in a separate step, using the - 'trans_update_res_ids' function below.""" - logger = logging.getLogger('i18n') +def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None): + """Populates the ir_translation table.""" if verbose: - logger.info('loading translation file for language %s', lang) + _logger.info('loading translation file for language %s', lang) if context is None: context = {} db_name = cr.dbname - pool = pooler.get_pool(db_name) - lang_obj = pool.get('res.lang') - trans_obj = pool.get('ir.translation') + registry = openerp.registry(db_name) + lang_obj = registry.get('res.lang') + trans_obj = registry.get('ir.translation') iso_lang = misc.get_iso_codes(lang) try: - uid = 1 - ids = lang_obj.search(cr, uid, [('code','=', lang)]) + ids = lang_obj.search(cr, SUPERUSER_ID, [('code','=', lang)]) if not ids: # lets create the language with locale information - lang_obj.load_lang(cr, 1, lang=lang, lang_name=lang_name) + lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name) + # Parse also the POT: it will possibly provide additional targets. + # (Because the POT comments are correct on Launchpad but not the + # PO comments due to a Launchpad limitation. See LP bug 933496.) + pot_reader = [] # now, the serious things: we read the language file fileobj.seek(0) @@ -884,65 +946,105 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, break elif fileformat == 'po': reader = TinyPoFile(fileobj) - f = ['type', 'name', 'res_id', 'src', 'value'] + f = ['type', 'name', 'res_id', 'src', 'value', 'comments'] + + # Make a reader for the POT file and be somewhat defensive for the + # stable branch. + if fileobj.name.endswith('.po'): + try: + # Normally the path looks like /path/to/xxx/i18n/lang.po + # and we try to find the corresponding + # /path/to/xxx/i18n/xxx.pot file. + head, _ = os.path.split(fileobj.name) + head2, _ = os.path.split(head) + head3, tail3 = os.path.split(head2) + pot_handle = misc.file_open(os.path.join(head3, tail3, 'i18n', tail3 + '.pot')) + pot_reader = TinyPoFile(pot_handle) + except: + pass + else: - logger.error('Bad file format: %s', fileformat) + _logger.error('Bad file format: %s', fileformat) raise Exception(_('Bad file format')) + # Read the POT `reference` comments, and keep them indexed by source + # string. + pot_targets = {} + for type, name, res_id, src, _, comments in pot_reader: + if type is not None: + pot_targets.setdefault(src, {'value': None, 'targets': []}) + pot_targets[src]['targets'].append((type, name, res_id)) + # read the rest of the file - line = 1 - irt_cursor = trans_obj._get_import_cursor(cr, uid, context=context) + irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context) - for row in reader: - line += 1 + def process_row(row): + """Process a single PO (or POT) entry.""" # skip empty rows and rows where the translation field (=last fiefd) is empty #if (not row) or (not row[-1]): - # continue + # return # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., - # 'src': ..., 'value': ...} - dic = {'lang': lang} - dic_module = False - for i in range(len(f)): - if f[i] in ('module',): - continue - dic[f[i]] = row[i] + # 'src': ..., 'value': ..., 'module':...} + dic = dict.fromkeys(('name', 'res_id', 'src', 'type', 'imd_model', 'imd_name', 'module', 'value', 'comments')) + dic['lang'] = lang + for i, field in enumerate(f): + dic[field] = row[i] + + # Get the `reference` comments from the POT. + src = row[3] + if pot_reader and src in pot_targets: + pot_targets[src]['targets'] = filter(lambda x: x != row[:3], pot_targets[src]['targets']) + pot_targets[src]['value'] = row[4] + if not pot_targets[src]['targets']: + del pot_targets[src] # This would skip terms that fail to specify a res_id - if not dic.get('res_id', False): - continue + if not dic.get('res_id'): + return res_id = dic.pop('res_id') if res_id and isinstance(res_id, (int, long)) \ or (isinstance(res_id, basestring) and res_id.isdigit()): dic['res_id'] = int(res_id) + dic['module'] = module_name else: - try: - tmodel = dic['name'].split(',')[0] - if '.' in res_id: - tmodule, tname = res_id.split('.', 1) - else: - tmodule = dic_module - tname = res_id - dic['imd_model'] = tmodel - dic['imd_module'] = tmodule - dic['imd_name'] = tname - - dic['res_id'] = None - except Exception: - logger.warning("Could not decode resource for %s, please fix the po file.", - dic['res_id'], exc_info=True) - dic['res_id'] = None + tmodel = dic['name'].split(',')[0] + if '.' in res_id: + tmodule, tname = res_id.split('.', 1) + else: + tmodule = False + tname = res_id + dic['imd_model'] = tmodel + dic['imd_name'] = tname + dic['module'] = tmodule + dic['res_id'] = None irt_cursor.push(dic) + # First process the entries from the PO file (doing so also fills/removes + # the entries from the POT file). + for row in reader: + process_row(row) + + # Then process the entries implied by the POT file (which is more + # correct w.r.t. the targets) if some of them remain. + pot_rows = [] + for src in pot_targets: + value = pot_targets[src]['value'] + for type, name, res_id in pot_targets[src]['targets']: + pot_rows.append((type, name, res_id, src, value, comments)) + for row in pot_rows: + process_row(row) + irt_cursor.finish() + trans_obj.clear_caches() if verbose: - logger.info("translation file loaded succesfully") + _logger.info("translation file loaded succesfully") except IOError: filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat) - logger.exception("couldn't read translation file %s", filename) + _logger.exception("couldn't read translation file %s", filename) def get_locales(lang=None): if lang is None: @@ -991,11 +1093,10 @@ def load_language(cr, lang): :param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE') :type lang: str """ - pool = pooler.get_pool(cr.dbname) - language_installer = pool.get('base.language.install') - uid = 1 - oid = language_installer.create(cr, uid, {'lang': lang}) - language_installer.lang_install(cr, uid, [oid], context=None) + registry = openerp.registry(cr.dbname) + language_installer = registry['base.language.install'] + oid = language_installer.create(cr, SUPERUSER_ID, {'lang': lang}) + language_installer.lang_install(cr, SUPERUSER_ID, [oid], context=None) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: