import itertools
import locale
import os
+import pooler
import re
import logging
import tarfile
import tempfile
+import threading
from os.path import join
-import logging
from datetime import datetime
from lxml import etree
-import tools, pooler
+import tools
import netsvc
from tools.misc import UpdateableStr
class GettextAlias(object):
+ def _get_db(self):
+ # find current DB based on thread/worker db name (see netsvc)
+ db_name = getattr(threading.currentThread(), 'dbname', None)
+ if db_name:
+ return pooler.get_db_only(db_name)
+
def _get_cr(self, frame):
is_new_cr = False
- cr = frame.f_locals.get('cr')
+ cr = frame.f_locals.get('cr', frame.f_locals.get('cursor'))
if not cr:
s = frame.f_locals.get('self', {})
- cr = getattr(s, 'cr', False)
- if not cr:
- if frame.f_globals.get('pooler', False):
- # TODO: we should probably get rid of the 'is_new_cr' case: no cr in locals -> no translation for you
- dbs = frame.f_globals['pooler'].pool_dic.keys()
- if len(dbs) == 1:
- cr = pooler.get_db(dbs[0]).cursor()
- is_new_cr = True
+ cr = getattr(s, 'cr', None)
+ if not cr:
+ db = self._get_db()
+ if db:
+ cr = db.cursor()
+ is_new_cr = True
return cr, is_new_cr
-
+
def _get_lang(self, frame):
- lang = frame.f_locals.get('context', {}).get('lang', False)
+ lang = None
+ ctx = frame.f_locals.get('context')
+ if not ctx:
+ kwargs = frame.f_locals.get('kwargs')
+ if kwargs is None:
+ args = frame.f_locals.get('args')
+ if args and isinstance(args, (list, tuple)) \
+ and isinstance(args[-1], dict):
+ ctx = args[-1]
+ elif isinstance(kwargs, dict):
+ ctx = kwargs.get('context')
+ if ctx:
+ lang = ctx.get('lang')
if not lang:
- args = frame.f_locals.get('args', False)
- if args:
- lang = args[-1].get('lang', False)
- if not lang:
- s = frame.f_locals.get('self', {})
- c = getattr(s, 'localcontext', {})
- lang = c.get('lang', False)
+ s = frame.f_locals.get('self', {})
+ c = getattr(s, 'localcontext', None)
+ if c:
+ lang = c.get('lang')
return lang
-
+
def __call__(self, source):
- is_new_cr = False
res = source
+ cr = None
+ is_new_cr = False
try:
- frame = inspect.stack()[1][0]
- cr, is_new_cr = self._get_cr(frame)
+ frame = inspect.currentframe()
+ if frame is None:
+ return source
+ frame = frame.f_back
+ if not frame:
+ return source
lang = self._get_lang(frame)
- if lang and cr:
- cr.execute('SELECT value FROM ir_translation WHERE lang=%s AND type IN (%s, %s) AND src=%s', (lang, 'code','sql_constraint', source))
- res_trans = cr.fetchone()
- res = res_trans and res_trans[0] or source
+ if lang:
+ cr, is_new_cr = self._get_cr(frame)
+ if cr:
+ # Try to use ir.translation to benefit from global cache if possible
+ pool = pooler.get_pool(cr.dbname)
+ res = pool.get('ir.translation')._get_source(cr, 1, None, ('code','sql_constraint'), lang, source)
+ else:
+ logger.debug('no context cursor detected, skipping translation for "%r"', source)
+ else:
+ logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
- logger.debug('translation went wrong for string %s', repr(source))
+ logger.debug('translation went wrong for "%r", skipped', source)
+ # if so, double-check the root/base translations filenames
finally:
- if is_new_cr:
+ if cr and is_new_cr:
cr.close()
return res
def quote(s):
"""Returns quoted PO term string, with special PO characters escaped"""
- assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines"
+ assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
return '"%s"' % s.replace('\\','\\\\') \
.replace('"','\\"') \
.replace('\n', '\\n"\n"')
self.logger = logging.getLogger('i18n')
self.buffer = buffer
- def warn(self, msg):
- self.logger.warning(msg)
+ def warn(self, msg, *args):
+ self.logger.warning(msg, *args)
def __iter__(self):
self.buffer.seek(0)
if self.tnrs:
type, name, res_id, source, trad = self.tnrs.pop(0)
+ if not res_id:
+ res_id = '0'
else:
tmp_tnrs = []
line = None
rows_by_module = {}
for row in rows:
module = row[0]
- rows_by_module.setdefault(module, []).append(row)
+ # first row is the "header", as in csv, it will be popped
+ rows_by_module.setdefault(module, [['module', 'type', 'name', 'res_id', 'src', ''],])
+ rows_by_module[module].append(row)
tmpdir = tempfile.mkdtemp()
for mod, modrows in rows_by_module.items():
def trans_parse_view(de):
res = []
+ if de.tag == 'attribute' and de.get("name") == 'string':
+ if de.text:
+ res.append(de.text.encode("utf8"))
if de.get("string"):
res.append(de.get('string').encode("utf8"))
if de.get("sum"):
res.append(de.get('sum').encode("utf8"))
+ if de.get("confirm"):
+ res.append(de.get('confirm').encode("utf8"))
for n in de:
res.extend(trans_parse_view(n))
return res
path_list = apaths
else :
path_list = [root_path,] + apaths
+
+ # Also scan these non-addon paths
+ for bin_path in ['osv', 'report' ]:
+ path_list.append(os.path.join(tools.config['root_path'], bin_path))
- logger.notifyChannel("i18n", netsvc.LOG_DEBUG, "Scanning modules at paths: %s" % (' '.join(path_list),))
+ logger.debug("Scanning modules at paths: ", path_list)
mod_paths = []
join_dquotes = re.compile(r'([^\\])"[\s\\]*"', re.DOTALL)
module = get_module_from_path(fabsolutepath, mod_paths=mod_paths)
is_mod_installed = module in installed_modules
if (('all' in modules) or (module in modules)) and is_mod_installed:
- logger.notifyChannel("i18n", netsvc.LOG_DEBUG, "Scanning code of %s at module: %s" % (frelativepath, module))
+ logger.debug("Scanning code of %s at module: %s", frelativepath, module)
code_string = tools.file_open(fabsolutepath, subdir='').read()
if module in installed_modules:
frelativepath = str("addons" + frelativepath)
ite = re_dquotes.finditer(code_string)
+ code_offset = 0
+ code_line = 1
for i in ite:
src = i.group(1)
if src.startswith('""'):
src = src[2:-2]
else:
src = join_dquotes.sub(r'\1', src)
+ # try to count the lines from the last pos to our place:
+ code_line += code_string[code_offset:i.start(1)].count('\n')
# now, since we did a binary read of a python source file, we
# have to expand pythonic escapes like the interpreter does.
src = src.decode('string_escape')
- push_translation(module, terms_type, frelativepath, 0, encode(src))
+ push_translation(module, terms_type, frelativepath, code_line, encode(src))
+ code_line += i.group(1).count('\n')
+ code_offset = i.end() # we have counted newlines up to the match end
+
ite = re_quotes.finditer(code_string)
+ code_offset = 0 #reset counters
+ code_line = 1
for i in ite:
src = i.group(1)
if src.startswith("''"):
src = src[2:-2]
else:
src = join_quotes.sub(r'\1', src)
+ code_line += code_string[code_offset:i.start(1)].count('\n')
src = src.decode('string_escape')
- push_translation(module, terms_type, frelativepath, 0, encode(src))
+ push_translation(module, terms_type, frelativepath, code_line, encode(src))
+ code_line += i.group(1).count('\n')
+ code_offset = i.end() # we have counted newlines up to the match end
for path in path_list:
- logger.notifyChannel("i18n", netsvc.LOG_DEBUG, "Scanning files of modules at %s" % path)
+ logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in tools.osutil.walksymlinks(path):
for fname in itertools.chain(fnmatch.filter(files, '*.py')):
export_code_terms_from_file(fname, path, root, 'code')
cr.close()
return out
-def trans_load(db_name, filename, lang, strict=False, verbose=True, context={}):
+def trans_load(db_name, filename, lang, verbose=True, context=None):
logger = logging.getLogger('i18n')
try:
fileobj = open(filename,'r')
logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
- r = trans_load_data(db_name, fileobj, fileformat, lang, strict=strict, verbose=verbose, context=context)
+ r = trans_load_data(db_name, fileobj, fileformat, lang, verbose=verbose, context=context)
fileobj.close()
return r
except IOError:
logger.error("couldn't read translation file %s", filename)
return None
-def trans_load_data(db_name, fileobj, fileformat, lang, strict=False, lang_name=None, verbose=True, context={}):
+def trans_load_data(db_name, fileobj, fileformat, lang, lang_name=None, verbose=True, context=None):
logger = logging.getLogger('i18n')
if verbose:
logger.info('loading translation file for language %s', lang)
+ if context is None:
+ context = {}
pool = pooler.get_pool(db_name)
lang_obj = pool.get('res.lang')
trans_obj = pool.get('ir.translation')
dic[f[i]] = row[i]
try:
- dic['res_id'] = int(dic['res_id'])
+ dic['res_id'] = dic['res_id'] and int(dic['res_id']) or 0
except:
model_data_ids = model_data_obj.search(cr, uid, [
('model', '=', dic['name'].split(',')[0]),
else:
dic['res_id'] = False
- if dic['type'] == 'model' and not strict:
- (model, field) = dic['name'].split(',')
-
- # get the ids of the resources of this model which share
- # the same source
- obj = pool.get(model)
- if obj:
- if field not in obj.fields_get_keys(cr, uid):
- continue
- ids = obj.search(cr, uid, [(field, '=', dic['src'])])
-
- # if the resource id (res_id) is in that list, use it,
- # otherwise use the whole list
- if not ids:
- ids = []
- ids = (dic['res_id'] in ids) and [dic['res_id']] or ids
- for id in ids:
- dic['res_id'] = id
- ids = trans_obj.search(cr, uid, [
- ('lang', '=', lang),
- ('type', '=', dic['type']),
- ('name', '=', dic['name']),
- ('src', '=', dic['src']),
- ('res_id', '=', dic['res_id'])
- ])
- if ids:
- if context.get('overwrite', False):
- trans_obj.write(cr, uid, ids, {'value': dic['value']})
- else:
- trans_obj.create(cr, uid, dic)
+ args = [
+ ('lang', '=', lang),
+ ('type', '=', dic['type']),
+ ('name', '=', dic['name']),
+ ('src', '=', dic['src']),
+ ]
+ if dic['type'] == 'model':
+ args.append(('res_id', '=', dic['res_id']))
+ ids = trans_obj.search(cr, uid, args)
+ if ids:
+ if context.get('overwrite'):
+ trans_obj.write(cr, uid, ids, {'value': dic['value']})
else:
- ids = trans_obj.search(cr, uid, [
- ('lang', '=', lang),
- ('type', '=', dic['type']),
- ('name', '=', dic['name']),
- ('src', '=', dic['src'])
- ])
- if ids:
- if context.get('overwrite', False):
- trans_obj.write(cr, uid, ids, {'value': dic['value']})
- else:
- trans_obj.create(cr, uid, dic)
+ trans_obj.create(cr, uid, dic)
cr.commit()
cr.close()
if verbose: