import config
import misc
-from misc import UpdateableStr
from misc import SKIPPED_ELEMENT_TYPES
import osutil
import openerp
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
+SKIPPED_ELEMENTS = ('script', 'style')
+
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
- 'bs_BA': 'Serbian (Latin)',
+ 'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang')
- if 'context' in frame.f_locals:
- return frame.f_locals['context'].get('lang')
- kwargs = frame.f_locals.get('kwargs', {})
- if 'context' in kwargs:
- return kwargs['context'].get('lang')
- s = frame.f_locals.get('self')
- if hasattr(s, 'env'):
- return s.env.lang
- if hasattr(s, 'localcontext'):
- return s.localcontext.get('lang')
- # Last resort: attempt to guess the language of the user
- # Pitfall: some operations are performed in sudo mode, and we
- # don't know the originial uid, so the language may
- # be wrong when the admin language differs.
- pool = getattr(s, 'pool', None)
- (cr, dummy) = self._get_cr(frame, allow_create=False)
- uid = self._get_uid(frame)
- if pool and cr and uid:
- return pool['res.users'].context_get(cr, uid)['lang']
- return None
+ lang = None
+ if frame.f_locals.get('context'):
+ lang = frame.f_locals['context'].get('lang')
+ if not lang:
+ kwargs = frame.f_locals.get('kwargs', {})
+ if kwargs.get('context'):
+ lang = kwargs['context'].get('lang')
+ if not lang:
+ s = frame.f_locals.get('self')
+ if hasattr(s, 'env'):
+ lang = s.env.lang
+ if not lang:
+ if hasattr(s, 'localcontext'):
+ lang = s.localcontext.get('lang')
+ if not lang:
+ # Last resort: attempt to guess the language of the user
+ # Pitfall: some operations are performed in sudo mode, and we
+ # don't know the originial uid, so the language may
+ # be wrong when the admin language differs.
+ pool = getattr(s, 'pool', None)
+ (cr, dummy) = self._get_cr(frame, allow_create=False)
+ uid = self._get_uid(frame)
+ if pool and cr and uid:
+ lang = pool['res.users'].context_get(cr, uid)['lang']
+ return lang
def __call__(self, source):
res = source
if not line.startswith('module:'):
comments.append(line)
elif line.startswith('#:'):
+ # Process the `reference` comments. Each line can specify
+ # multiple targets (e.g. model, view, code, selection,
+ # ...). For each target, we will return an additional
+ # entry.
for lpart in line[2:].strip().split(' '):
trans_info = lpart.strip().split(':',2)
if trans_info and len(trans_info) == 2:
line = self.lines.pop(0).strip()
if targets and not fuzzy:
+ # Use the first target for the current entry (returned at the
+ # end of this next() call), and keep the others to generate
+ # additional entries (returned the next next() calls).
trans_type, name, res_id = targets.pop(0)
for t, n, r in targets:
if t == trans_type == 'code': continue
raise Exception(_('Unrecognized extension: must be one of '
'.csv, .po, or .tgz (received .%s).' % format))
- trans_lang = lang
- if not trans_lang and format == 'csv':
- # CSV files are meant for translators and they need a starting point,
- # so we at least put the original term in the translation column
- trans_lang = 'en_US'
translations = trans_generate(lang, modules, cr)
- modules = set([t[0] for t in translations[1:]])
+ modules = set(t[0] for t in translations)
_process(format, modules, translations, buffer, lang)
del translations
res.extend(trans_parse_rml(n))
return res
-def trans_parse_view(de):
- res = []
- if not isinstance(de, SKIPPED_ELEMENT_TYPES) and de.text and de.text.strip():
- res.append(de.text.strip().encode("utf8"))
- if de.tail and de.tail.strip():
- res.append(de.tail.strip().encode("utf8"))
- if de.tag == 'attribute' and de.get("name") == 'string':
- if de.text:
- res.append(de.text.encode("utf8"))
- if de.get("string"):
- res.append(de.get('string').encode("utf8"))
- if de.get("help"):
- res.append(de.get('help').encode("utf8"))
- if de.get("sum"):
- res.append(de.get('sum').encode("utf8"))
- if de.get("confirm"):
- res.append(de.get('confirm').encode("utf8"))
- if de.get("placeholder"):
- res.append(de.get('placeholder').encode("utf8"))
- for n in de:
- res.extend(trans_parse_view(n))
- return res
+def _push(callback, term, source_line):
+ """ Sanity check before pushing translation terms """
+ term = (term or "").strip().encode('utf8')
+ # Avoid non-char tokens like ':' '...' '.00' etc.
+ if len(term) > 8 or any(x.isalpha() for x in term):
+ callback(term, source_line)
+
+def trans_parse_view(element, callback):
+ """ Helper method to recursively walk an etree document representing a
+ regular view and call ``callback(term)`` for each translatable term
+ that is found in the document.
+
+ :param ElementTree element: root of etree document to extract terms from
+ :param callable callback: a callable in the form ``f(term, source_line)``,
+ that will be called for each extracted term.
+ """
+ for el in element.iter():
+ if (not isinstance(el, SKIPPED_ELEMENT_TYPES)
+ and el.tag.lower() not in SKIPPED_ELEMENTS
+ and el.text):
+ _push(callback, el.text, el.sourceline)
+ if el.tail:
+ _push(callback, el.tail, el.sourceline)
+ for attr in ('string', 'help', 'sum', 'confirm', 'placeholder'):
+ value = el.get(attr)
+ if value:
+ _push(callback, value, el.sourceline)
# tests whether an object is in a list of modules
def in_modules(object_name, modules):
module = module_dict.get(module, module)
return module in modules
+def _extract_translatable_qweb_terms(element, callback):
+ """ Helper method to walk an etree document representing
+ a QWeb template, and call ``callback(term)`` for each
+ translatable term that is found in the document.
+
+ :param etree._Element element: root of etree document to extract terms from
+ :param Callable callback: a callable in the form ``f(term, source_line)``,
+ that will be called for each extracted term.
+ """
+ # not using elementTree.iterparse because we need to skip sub-trees in case
+ # the ancestor element had a reason to be skipped
+ for el in element:
+ if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
+ if (el.tag.lower() not in SKIPPED_ELEMENTS
+ and "t-js" not in el.attrib
+ and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
+ and el.get("t-translation", '').strip() != "off"):
+ _push(callback, el.text, el.sourceline)
+ for att in ('title', 'alt', 'label', 'placeholder'):
+ if att in el.attrib:
+ _push(callback, el.attrib[att], el.sourceline)
+ _extract_translatable_qweb_terms(el, callback)
+ _push(callback, el.tail, el.sourceline)
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
"""Babel message extractor for qweb template files.
+
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
- :rtype: ``iterator``
+ :rtype: Iterable
"""
result = []
def handle_text(text, lineno):
- text = (text or "").strip()
- if len(text) > 1: # Avoid mono-char tokens like ':' ',' etc.
- result.append((lineno, None, text, []))
-
- # not using elementTree.iterparse because we need to skip sub-trees in case
- # the ancestor element had a reason to be skipped
- def iter_elements(current_element):
- for el in current_element:
- if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
- if "t-js" not in el.attrib and \
- not ("t-jquery" in el.attrib and "t-operation" not in el.attrib) and \
- not ("t-translation" in el.attrib and el.attrib["t-translation"].strip() == "off"):
- handle_text(el.text, el.sourceline)
- for att in ('title', 'alt', 'label', 'placeholder'):
- if att in el.attrib:
- handle_text(el.attrib[att], el.sourceline)
- iter_elements(el)
- handle_text(el.tail, el.sourceline)
-
+ result.append((lineno, None, text, []))
tree = etree.parse(fileobj)
- iter_elements(tree.getroot())
-
+ _extract_translatable_qweb_terms(tree.getroot(), handle_text)
return result
-
def trans_generate(lang, modules, cr):
dbname = cr.dbname
# empty and one-letter terms are ignored, they probably are not meant to be
# translated, and would be very hard to translate anyway.
if not source or len(source.strip()) <= 1:
- _logger.debug("Ignoring empty or 1-letter source term: %r", tuple)
return
if tuple not in _to_translate:
_to_translate.append(tuple)
return s.encode('utf8')
return s
+ def push(mod, type, name, res_id, term):
+ term = (term or '').strip()
+ if len(term) > 2:
+ push_translation(mod, type, name, res_id, term)
+
+ def get_root_view(xml_id):
+ view = model_data_obj.xmlid_to_object(cr, uid, xml_id)
+ if view:
+ while view.mode != 'primary':
+ view = view.inherit_id
+ xml_id = view.get_external_id(cr, uid).get(view.id, xml_id)
+ return xml_id
+
for (xml_name,model,res_id,module) in cr.fetchall():
module = encode(module)
model = encode(model)
_logger.error("Unable to find object %r", model)
continue
+ if not registry[model]._translate:
+ # explicitly disabled
+ continue
+
exists = registry[model].exists(cr, uid, res_id)
if not exists:
_logger.warning("Unable to find object %r with id %d", model, res_id)
if model=='ir.ui.view':
d = etree.XML(encode(obj.arch))
- for t in trans_parse_view(d):
- push_translation(module, 'view', encode(obj.model), 0, t)
+ if obj.type == 'qweb':
+ view_id = get_root_view(xml_name)
+ push_qweb = lambda t,l: push(module, 'view', 'website', view_id, t)
+ _extract_translatable_qweb_terms(d, push_qweb)
+ else:
+ push_view = lambda t,l: push(module, 'view', obj.model, xml_name, t)
+ trans_parse_view(d, push_view)
elif model=='ir.actions.wizard':
pass # TODO Can model really be 'ir.actions.wizard' ?
_logger.error("name error in %s: %s", xml_name, str(exc))
continue
objmodel = registry.get(obj.model)
- if objmodel is None or field_name not in objmodel._columns:
+ if (objmodel is None or field_name not in objmodel._columns
+ or not objmodel._translate):
continue
field_def = objmodel._columns[field_name]
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
for field_name, field_def in obj._columns.items():
+ if model == 'ir.model' and field_name == 'name' and obj.name == obj.model:
+ # ignore model name if it is the technical one, nothing to translate
+ continue
if field_def.translate:
name = model + "," + field_name
try:
- trad = getattr(obj, field_name) or ''
+ term = obj[field_name] or ''
except:
- trad = ''
- push_translation(module, 'model', name, xml_name, encode(trad))
+ term = ''
+ push_translation(module, 'model', name, xml_name, encode(term))
# End of data for ir.model.data query results
if model_obj._sql_constraints:
push_local_constraints(module, model_obj, 'sql_constraints')
-
modobj = registry['ir.module.module']
installed_modids = modobj.search(cr, uid, [('state', '=', 'installed')])
installed_modules = map(lambda m: m['name'], modobj.read(cr, uid, installed_modids, ['name']))
for bin_path in ['osv', 'report' ]:
path_list.append(os.path.join(config.config['root_path'], bin_path))
- _logger.debug("Scanning modules at paths: ", path_list)
+ _logger.debug("Scanning modules at paths: %s", path_list)
mod_paths = list(path_list)
# lets create the language with locale information
lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
+ # Parse also the POT: it will possibly provide additional targets.
+ # (Because the POT comments are correct on Launchpad but not the
+ # PO comments due to a Launchpad limitation. See LP bug 933496.)
+ pot_reader = []
# now, the serious things: we read the language file
fileobj.seek(0)
elif fileformat == 'po':
reader = TinyPoFile(fileobj)
f = ['type', 'name', 'res_id', 'src', 'value', 'comments']
+
+ # Make a reader for the POT file and be somewhat defensive for the
+ # stable branch.
+ if fileobj.name.endswith('.po'):
+ try:
+ # Normally the path looks like /path/to/xxx/i18n/lang.po
+ # and we try to find the corresponding
+ # /path/to/xxx/i18n/xxx.pot file.
+ head, _ = os.path.split(fileobj.name)
+ head2, _ = os.path.split(head)
+ head3, tail3 = os.path.split(head2)
+ pot_handle = misc.file_open(os.path.join(head3, tail3, 'i18n', tail3 + '.pot'))
+ pot_reader = TinyPoFile(pot_handle)
+ except:
+ pass
+
else:
_logger.error('Bad file format: %s', fileformat)
raise Exception(_('Bad file format'))
+ # Read the POT `reference` comments, and keep them indexed by source
+ # string.
+ pot_targets = {}
+ for type, name, res_id, src, _, comments in pot_reader:
+ if type is not None:
+ pot_targets.setdefault(src, {'value': None, 'targets': []})
+ pot_targets[src]['targets'].append((type, name, res_id))
+
# read the rest of the file
- line = 1
irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
- for row in reader:
- line += 1
+ def process_row(row):
+ """Process a single PO (or POT) entry."""
# skip empty rows and rows where the translation field (=last fiefd) is empty
#if (not row) or (not row[-1]):
- # continue
+ # return
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
for i, field in enumerate(f):
dic[field] = row[i]
+ # Get the `reference` comments from the POT.
+ src = row[3]
+ if pot_reader and src in pot_targets:
+ pot_targets[src]['targets'] = filter(lambda x: x != row[:3], pot_targets[src]['targets'])
+ pot_targets[src]['value'] = row[4]
+ if not pot_targets[src]['targets']:
+ del pot_targets[src]
+
# This would skip terms that fail to specify a res_id
if not dic.get('res_id'):
- continue
+ return
res_id = dic.pop('res_id')
if res_id and isinstance(res_id, (int, long)) \
irt_cursor.push(dic)
+ # First process the entries from the PO file (doing so also fills/removes
+ # the entries from the POT file).
+ for row in reader:
+ process_row(row)
+
+ # Then process the entries implied by the POT file (which is more
+ # correct w.r.t. the targets) if some of them remain.
+ pot_rows = []
+ for src in pot_targets:
+ value = pot_targets[src]['value']
+ for type, name, res_id in pot_targets[src]['targets']:
+ pot_rows.append((type, name, res_id, src, value, comments))
+ for row in pot_rows:
+ process_row(row)
+
irt_cursor.finish()
trans_obj.clear_caches()
if verbose: