'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
- 'bs_BA': 'Serbian (Latin)',
+ 'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang')
- if 'context' in frame.f_locals:
- return frame.f_locals['context'].get('lang')
- kwargs = frame.f_locals.get('kwargs', {})
- if 'context' in kwargs:
- return kwargs['context'].get('lang')
- s = frame.f_locals.get('self')
- if hasattr(s, 'env'):
- return s.env.lang
- if hasattr(s, 'localcontext'):
- return s.localcontext.get('lang')
- # Last resort: attempt to guess the language of the user
- # Pitfall: some operations are performed in sudo mode, and we
- # don't know the originial uid, so the language may
- # be wrong when the admin language differs.
- pool = getattr(s, 'pool', None)
- (cr, dummy) = self._get_cr(frame, allow_create=False)
- uid = self._get_uid(frame)
- if pool and cr and uid:
- return pool['res.users'].context_get(cr, uid)['lang']
- return None
+ lang = None
+ if frame.f_locals.get('context'):
+ lang = frame.f_locals['context'].get('lang')
+ if not lang:
+ kwargs = frame.f_locals.get('kwargs', {})
+ if kwargs.get('context'):
+ lang = kwargs['context'].get('lang')
+ if not lang:
+ s = frame.f_locals.get('self')
+ if hasattr(s, 'env'):
+ lang = s.env.lang
+ if not lang:
+ if hasattr(s, 'localcontext'):
+ lang = s.localcontext.get('lang')
+ if not lang:
+ # Last resort: attempt to guess the language of the user
+ # Pitfall: some operations are performed in sudo mode, and we
+ # don't know the originial uid, so the language may
+ # be wrong when the admin language differs.
+ pool = getattr(s, 'pool', None)
+ (cr, dummy) = self._get_cr(frame, allow_create=False)
+ uid = self._get_uid(frame)
+ if pool and cr and uid:
+ lang = pool['res.users'].context_get(cr, uid)['lang']
+ return lang
def __call__(self, source):
res = source
if not line.startswith('module:'):
comments.append(line)
elif line.startswith('#:'):
+ # Process the `reference` comments. Each line can specify
+ # multiple targets (e.g. model, view, code, selection,
+ # ...). For each target, we will return an additional
+ # entry.
for lpart in line[2:].strip().split(' '):
trans_info = lpart.strip().split(':',2)
if trans_info and len(trans_info) == 2:
line = self.lines.pop(0).strip()
if targets and not fuzzy:
+ # Use the first target for the current entry (returned at the
+ # end of this next() call), and keep the others to generate
+ # additional entries (returned the next next() calls).
trans_type, name, res_id = targets.pop(0)
for t, n, r in targets:
if t == trans_type == 'code': continue
:param callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
- if (not isinstance(element, SKIPPED_ELEMENT_TYPES)
- and element.tag.lower() not in SKIPPED_ELEMENTS
- and element.text):
- _push(callback, element.text, element.sourceline)
- if element.tail:
- _push(callback, element.tail, element.sourceline)
- for attr in ('string', 'help', 'sum', 'confirm', 'placeholder'):
- value = element.get(attr)
- if value:
- _push(callback, value, element.sourceline)
- for n in element:
- trans_parse_view(n, callback)
+ for el in element.iter():
+ if (not isinstance(el, SKIPPED_ELEMENT_TYPES)
+ and el.tag.lower() not in SKIPPED_ELEMENTS
+ and el.text):
+ _push(callback, el.text, el.sourceline)
+ if el.tail:
+ _push(callback, el.tail, el.sourceline)
+ for attr in ('string', 'help', 'sum', 'confirm', 'placeholder'):
+ value = el.get(attr)
+ if value:
+ _push(callback, value, el.sourceline)
# tests whether an object is in a list of modules
def in_modules(object_name, modules):
a QWeb template, and call ``callback(term)`` for each
translatable term that is found in the document.
- :param ElementTree element: root of etree document to extract terms from
- :param callable callback: a callable in the form ``f(term, source_line)``,
- that will be called for each extracted term.
+ :param etree._Element element: root of etree document to extract terms from
+ :param Callable callback: a callable in the form ``f(term, source_line)``,
+ that will be called for each extracted term.
"""
# not using elementTree.iterparse because we need to skip sub-trees in case
# the ancestor element had a reason to be skipped
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
"""Babel message extractor for qweb template files.
+
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
- :rtype: ``iterator``
+ :rtype: Iterable
"""
result = []
def handle_text(text, lineno):
if model_obj._sql_constraints:
push_local_constraints(module, model_obj, 'sql_constraints')
-
modobj = registry['ir.module.module']
installed_modids = modobj.search(cr, uid, [('state', '=', 'installed')])
installed_modules = map(lambda m: m['name'], modobj.read(cr, uid, installed_modids, ['name']))
# lets create the language with locale information
lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
+ # Parse also the POT: it will possibly provide additional targets.
+ # (Because the POT comments are correct on Launchpad but not the
+ # PO comments due to a Launchpad limitation. See LP bug 933496.)
+ pot_reader = []
# now, the serious things: we read the language file
fileobj.seek(0)
elif fileformat == 'po':
reader = TinyPoFile(fileobj)
f = ['type', 'name', 'res_id', 'src', 'value', 'comments']
+
+ # Make a reader for the POT file and be somewhat defensive for the
+ # stable branch.
+ if fileobj.name.endswith('.po'):
+ try:
+ # Normally the path looks like /path/to/xxx/i18n/lang.po
+ # and we try to find the corresponding
+ # /path/to/xxx/i18n/xxx.pot file.
+ head, _ = os.path.split(fileobj.name)
+ head2, _ = os.path.split(head)
+ head3, tail3 = os.path.split(head2)
+ pot_handle = misc.file_open(os.path.join(head3, tail3, 'i18n', tail3 + '.pot'))
+ pot_reader = TinyPoFile(pot_handle)
+ except:
+ pass
+
else:
_logger.error('Bad file format: %s', fileformat)
raise Exception(_('Bad file format'))
+ # Read the POT `reference` comments, and keep them indexed by source
+ # string.
+ pot_targets = {}
+ for type, name, res_id, src, _, comments in pot_reader:
+ if type is not None:
+ pot_targets.setdefault(src, {'value': None, 'targets': []})
+ pot_targets[src]['targets'].append((type, name, res_id))
+
# read the rest of the file
- line = 1
irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
- for row in reader:
- line += 1
+ def process_row(row):
+ """Process a single PO (or POT) entry."""
# skip empty rows and rows where the translation field (=last fiefd) is empty
#if (not row) or (not row[-1]):
- # continue
+ # return
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
for i, field in enumerate(f):
dic[field] = row[i]
+ # Get the `reference` comments from the POT.
+ src = row[3]
+ if pot_reader and src in pot_targets:
+ pot_targets[src]['targets'] = filter(lambda x: x != row[:3], pot_targets[src]['targets'])
+ pot_targets[src]['value'] = row[4]
+ if not pot_targets[src]['targets']:
+ del pot_targets[src]
+
# This would skip terms that fail to specify a res_id
if not dic.get('res_id'):
- continue
+ return
res_id = dic.pop('res_id')
if res_id and isinstance(res_id, (int, long)) \
irt_cursor.push(dic)
+ # First process the entries from the PO file (doing so also fills/removes
+ # the entries from the POT file).
+ for row in reader:
+ process_row(row)
+
+ # Then process the entries implied by the POT file (which is more
+ # correct w.r.t. the targets) if some of them remain.
+ pot_rows = []
+ for src in pot_targets:
+ value = pot_targets[src]['value']
+ for type, name, res_id in pot_targets[src]['targets']:
+ pot_rows.append((type, name, res_id, src, value, comments))
+ for row in pot_rows:
+ process_row(row)
+
irt_cursor.finish()
trans_obj.clear_caches()
if verbose: