1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
29 import openerp.pooler as pooler
30 import openerp.sql_db as sql_db
36 from os.path import join
38 from datetime import datetime
39 from lxml import etree
43 from misc import UpdateableStr
44 from misc import SKIPPED_ELEMENT_TYPES
47 _logger = logging.getLogger(__name__)
50 'af_ZA': 'Afrikaans_South Africa',
51 'sq_AL': 'Albanian_Albania',
52 'ar_SA': 'Arabic_Saudi Arabia',
53 'eu_ES': 'Basque_Spain',
54 'be_BY': 'Belarusian_Belarus',
55 'bs_BA': 'Serbian (Latin)',
56 'bg_BG': 'Bulgarian_Bulgaria',
57 'ca_ES': 'Catalan_Spain',
58 'hr_HR': 'Croatian_Croatia',
59 'zh_CN': 'Chinese_China',
60 'zh_TW': 'Chinese_Taiwan',
61 'cs_CZ': 'Czech_Czech Republic',
62 'da_DK': 'Danish_Denmark',
63 'nl_NL': 'Dutch_Netherlands',
64 'et_EE': 'Estonian_Estonia',
65 'fa_IR': 'Farsi_Iran',
66 'ph_PH': 'Filipino_Philippines',
67 'fi_FI': 'Finnish_Finland',
68 'fr_FR': 'French_France',
69 'fr_BE': 'French_France',
70 'fr_CH': 'French_France',
71 'fr_CA': 'French_France',
72 'ga': 'Scottish Gaelic',
73 'gl_ES': 'Galician_Spain',
74 'ka_GE': 'Georgian_Georgia',
75 'de_DE': 'German_Germany',
76 'el_GR': 'Greek_Greece',
77 'gu': 'Gujarati_India',
78 'he_IL': 'Hebrew_Israel',
80 'hu': 'Hungarian_Hungary',
81 'is_IS': 'Icelandic_Iceland',
82 'id_ID': 'Indonesian_indonesia',
83 'it_IT': 'Italian_Italy',
84 'ja_JP': 'Japanese_Japan',
87 'ko_KR': 'Korean_Korea',
89 'lt_LT': 'Lithuanian_Lithuania',
90 'lat': 'Latvian_Latvia',
91 'ml_IN': 'Malayalam_India',
92 'id_ID': 'Indonesian_indonesia',
94 'mn': 'Cyrillic_Mongolian',
95 'no_NO': 'Norwegian_Norway',
96 'nn_NO': 'Norwegian-Nynorsk_Norway',
97 'pl': 'Polish_Poland',
98 'pt_PT': 'Portuguese_Portugal',
99 'pt_BR': 'Portuguese_Brazil',
100 'ro_RO': 'Romanian_Romania',
101 'ru_RU': 'Russian_Russia',
103 'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
104 'sk_SK': 'Slovak_Slovakia',
105 'sl_SI': 'Slovenian_Slovenia',
106 #should find more specific locales for spanish countries,
107 #but better than nothing
108 'es_AR': 'Spanish_Spain',
109 'es_BO': 'Spanish_Spain',
110 'es_CL': 'Spanish_Spain',
111 'es_CO': 'Spanish_Spain',
112 'es_CR': 'Spanish_Spain',
113 'es_DO': 'Spanish_Spain',
114 'es_EC': 'Spanish_Spain',
115 'es_ES': 'Spanish_Spain',
116 'es_GT': 'Spanish_Spain',
117 'es_HN': 'Spanish_Spain',
118 'es_MX': 'Spanish_Spain',
119 'es_NI': 'Spanish_Spain',
120 'es_PA': 'Spanish_Spain',
121 'es_PE': 'Spanish_Spain',
122 'es_PR': 'Spanish_Spain',
123 'es_PY': 'Spanish_Spain',
124 'es_SV': 'Spanish_Spain',
125 'es_UY': 'Spanish_Spain',
126 'es_VE': 'Spanish_Spain',
127 'sv_SE': 'Swedish_Sweden',
128 'ta_IN': 'English_Australia',
129 'th_TH': 'Thai_Thailand',
131 'tr_TR': 'Turkish_Turkey',
132 'uk_UA': 'Ukrainian_Ukraine',
133 'vi_VN': 'Vietnamese_Viet Nam',
134 'tlh_TLH': 'Klingon',
139 class UNIX_LINE_TERMINATOR(csv.excel):
140 lineterminator = '\n'
142 csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
145 # Warning: better use self.pool.get('ir.translation')._get_source if you can
147 def translate(cr, name, source_type, lang, source=None):
149 cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s', (lang, source_type, str(name), source))
151 cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
153 cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s', (lang, source_type, source))
154 res_trans = cr.fetchone()
155 res = res_trans and res_trans[0] or False
158 class GettextAlias(object):
161 # find current DB based on thread/worker db name (see netsvc)
162 db_name = getattr(threading.currentThread(), 'dbname', None)
164 return sql_db.db_connect(db_name)
166 def _get_cr(self, frame, allow_create=True):
168 cr = frame.f_locals.get('cr', frame.f_locals.get('cursor'))
170 s = frame.f_locals.get('self', {})
171 cr = getattr(s, 'cr', None)
172 if not cr and allow_create:
179 def _get_uid(self, frame):
180 return frame.f_locals.get('uid') or frame.f_locals.get('user')
182 def _get_lang(self, frame):
184 ctx = frame.f_locals.get('context')
186 kwargs = frame.f_locals.get('kwargs')
188 args = frame.f_locals.get('args')
189 if args and isinstance(args, (list, tuple)) \
190 and isinstance(args[-1], dict):
192 elif isinstance(kwargs, dict):
193 ctx = kwargs.get('context')
195 lang = ctx.get('lang')
196 s = frame.f_locals.get('self', {})
198 c = getattr(s, 'localcontext', None)
202 # Last resort: attempt to guess the language of the user
203 # Pitfall: some operations are performed in sudo mode, and we
204 # don't know the originial uid, so the language may
205 # be wrong when the admin language differs.
206 pool = getattr(s, 'pool', None)
207 (cr, dummy) = self._get_cr(frame, allow_create=False)
208 uid = self._get_uid(frame)
209 if pool and cr and uid:
210 lang = pool.get('res.users').context_get(cr, uid)['lang']
213 def __call__(self, source):
218 frame = inspect.currentframe()
224 lang = self._get_lang(frame)
226 cr, is_new_cr = self._get_cr(frame)
228 # Try to use ir.translation to benefit from global cache if possible
229 pool = pooler.get_pool(cr.dbname)
230 res = pool.get('ir.translation')._get_source(cr, 1, None, ('code','sql_constraint'), lang, source)
232 _logger.debug('no context cursor detected, skipping translation for "%r"', source)
234 _logger.debug('no translation language detected, skipping translation for "%r" ', source)
236 _logger.debug('translation went wrong for "%r", skipped', source)
237 # if so, double-check the root/base translations filenames
247 """Returns quoted PO term string, with special PO characters escaped"""
248 assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
249 return '"%s"' % s.replace('\\','\\\\') \
250 .replace('"','\\"') \
251 .replace('\n', '\\n"\n"')
253 re_escaped_char = re.compile(r"(\\.)")
254 re_escaped_replacements = {'n': '\n', }
256 def _sub_replacement(match_obj):
257 return re_escaped_replacements.get(match_obj.group(1)[1], match_obj.group(1)[1])
260 """Returns unquoted PO term string, with special PO characters unescaped"""
261 return re_escaped_char.sub(_sub_replacement, str[1:-1])
263 # class to handle po files
264 class TinyPoFile(object):
265 def __init__(self, buffer):
268 def warn(self, msg, *args):
269 _logger.warning(msg, *args)
273 self.lines = self._get_lines()
274 self.lines_count = len(self.lines);
280 def _get_lines(self):
281 lines = self.buffer.readlines()
282 # remove the BOM (Byte Order Mark):
284 lines[0] = unicode(lines[0], 'utf8').lstrip(unicode( codecs.BOM_UTF8, "utf8"))
286 lines.append('') # ensure that the file ends with at least an empty line
290 return (self.lines_count - len(self.lines))
293 type = name = res_id = source = trad = None
296 type, name, res_id, source, trad = self.tnrs.pop(0)
304 if 0 == len(self.lines):
305 raise StopIteration()
306 line = self.lines.pop(0).strip()
307 while line.startswith('#'):
308 if line.startswith('#~ '):
310 if line.startswith('#:'):
311 for lpart in line[2:].strip().split(' '):
312 trans_info = lpart.strip().split(':',2)
313 if trans_info and len(trans_info) == 2:
314 # looks like the translation type is missing, which is not
315 # unexpected because it is not a GetText standard. Default: 'code'
316 trans_info[:0] = ['code']
317 if trans_info and len(trans_info) == 3:
318 tmp_tnrs.append(trans_info)
319 elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'):
321 line = self.lines.pop(0).strip()
323 # allow empty lines between comments and msgid
324 line = self.lines.pop(0).strip()
325 if line.startswith('#~ '):
326 while line.startswith('#~ ') or not line.strip():
327 if 0 == len(self.lines):
328 raise StopIteration()
329 line = self.lines.pop(0)
330 # This has been a deprecated entry, don't return anything
333 if not line.startswith('msgid'):
334 raise Exception("malformed file: bad line: %s" % line)
335 source = unquote(line[6:])
336 line = self.lines.pop(0).strip()
337 if not source and self.first:
338 # if the source is "" and it's the first msgid, it's the special
339 # msgstr with the informations about the traduction and the
340 # traductor; we skip it
343 line = self.lines.pop(0).strip()
346 while not line.startswith('msgstr'):
348 raise Exception('malformed file at %d'% self.cur_line())
349 source += unquote(line)
350 line = self.lines.pop(0).strip()
352 trad = unquote(line[7:])
353 line = self.lines.pop(0).strip()
355 trad += unquote(line)
356 line = self.lines.pop(0).strip()
358 if tmp_tnrs and not fuzzy:
359 type, name, res_id = tmp_tnrs.pop(0)
360 for t, n, r in tmp_tnrs:
361 self.tnrs.append((t, n, r, source, trad))
367 self.warn('Missing "#:" formated comment at line %d for the following source:\n\t%s',
368 self.cur_line(), source[:30])
370 return type, name, res_id, source, trad
372 def write_infos(self, modules):
373 import openerp.release as release
374 self.buffer.write("# Translation of %(project)s.\n" \
375 "# This file contains the translation of the following modules:\n" \
380 '''"Project-Id-Version: %(project)s %(version)s\\n"\n''' \
381 '''"Report-Msgid-Bugs-To: \\n"\n''' \
382 '''"POT-Creation-Date: %(now)s\\n"\n''' \
383 '''"PO-Revision-Date: %(now)s\\n"\n''' \
384 '''"Last-Translator: <>\\n"\n''' \
385 '''"Language-Team: \\n"\n''' \
386 '''"MIME-Version: 1.0\\n"\n''' \
387 '''"Content-Type: text/plain; charset=UTF-8\\n"\n''' \
388 '''"Content-Transfer-Encoding: \\n"\n''' \
389 '''"Plural-Forms: \\n"\n''' \
392 % { 'project': release.description,
393 'version': release.version,
394 'modules': reduce(lambda s, m: s + "#\t* %s\n" % m, modules, ""),
395 'now': datetime.utcnow().strftime('%Y-%m-%d %H:%M')+"+0000",
399 def write(self, modules, tnrs, source, trad):
401 plurial = len(modules) > 1 and 's' or ''
402 self.buffer.write("#. module%s: %s\n" % (plurial, ', '.join(modules)))
406 for typy, name, res_id in tnrs:
407 self.buffer.write("#: %s:%s:%s\n" % (typy, name, res_id))
412 # only strings in python code are python formated
413 self.buffer.write("#, python-format\n")
415 if not isinstance(trad, unicode):
416 trad = unicode(trad, 'utf8')
417 if not isinstance(source, unicode):
418 source = unicode(source, 'utf8')
422 % (quote(source), quote(trad))
423 self.buffer.write(msg.encode('utf8'))
426 # Methods to export the translation file
428 def trans_export(lang, modules, buffer, format, cr):
430 def _process(format, modules, rows, buffer, lang, newlang):
432 writer=csv.writer(buffer, 'UNIX')
437 writer = TinyPoFile(buffer)
438 writer.write_infos(modules)
440 # we now group the translations by source. That means one translation per source.
442 for module, type, name, res_id, src, trad in rows:
443 row = grouped_rows.setdefault(src, {})
444 row.setdefault('modules', set()).add(module)
445 if ('translation' not in row) or (not row['translation']):
447 row['translation'] = trad
448 row.setdefault('tnrs', []).append((type, name, res_id))
450 for src, row in grouped_rows.items():
452 row['translation'] = ''
453 elif not row.get('translation'):
454 row['translation'] = src
455 writer.write(row['modules'], row['tnrs'], src, row['translation'])
457 elif format == 'tgz':
462 # first row is the "header", as in csv, it will be popped
463 rows_by_module.setdefault(module, [['module', 'type', 'name', 'res_id', 'src', ''],])
464 rows_by_module[module].append(row)
466 tmpdir = tempfile.mkdtemp()
467 for mod, modrows in rows_by_module.items():
468 tmpmoddir = join(tmpdir, mod, 'i18n')
469 os.makedirs(tmpmoddir)
470 pofilename = (newlang and mod or lang) + ".po" + (newlang and 't' or '')
471 buf = file(join(tmpmoddir, pofilename), 'w')
472 _process('po', [mod], modrows, buf, lang, newlang)
475 tar = tarfile.open(fileobj=buffer, mode='w|gz')
480 raise Exception(_('Unrecognized extension: must be one of '
481 '.csv, .po, or .tgz (received .%s).' % format))
483 newlang = not bool(lang)
486 trans = trans_generate(lang, modules, cr)
487 if newlang and format!='csv':
490 modules = set([t[0] for t in trans[1:]])
491 _process(format, modules, trans, buffer, lang, newlang)
494 def trans_parse_xsl(de):
495 return list(set(trans_parse_xsl_aux(de, False)))
497 def trans_parse_xsl_aux(de, t):
503 if isinstance(n, SKIPPED_ELEMENT_TYPES) or n.tag.startswith('{http://www.w3.org/1999/XSL/Transform}'):
506 l = n.text.strip().replace('\n',' ')
508 res.append(l.encode("utf8"))
510 l = n.tail.strip().replace('\n',' ')
512 res.append(l.encode("utf8"))
513 res.extend(trans_parse_xsl_aux(n, t))
516 def trans_parse_rml(de):
520 if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text:
522 string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
523 for s in string_list:
525 res.append(s.encode("utf8"))
526 res.extend(trans_parse_rml(n))
529 def trans_parse_view(de):
531 if de.tag == 'attribute' and de.get("name") == 'string':
533 res.append(de.text.encode("utf8"))
535 res.append(de.get('string').encode("utf8"))
537 res.append(de.get('help').encode("utf8"))
539 res.append(de.get('sum').encode("utf8"))
540 if de.get("confirm"):
541 res.append(de.get('confirm').encode("utf8"))
543 res.extend(trans_parse_view(n))
546 # tests whether an object is in a list of modules
547 def in_modules(object_name, modules):
556 module = object_name.split('.')[0]
557 module = module_dict.get(module, module)
558 return module in modules
560 def trans_generate(lang, modules, cr):
563 pool = pooler.get_pool(dbname)
564 trans_obj = pool.get('ir.translation')
565 model_data_obj = pool.get('ir.model.data')
567 l = pool.models.items()
570 query = 'SELECT name, model, res_id, module' \
571 ' FROM ir_model_data'
573 query_models = """SELECT m.id, m.model, imd.module
574 FROM ir_model AS m, ir_model_data AS imd
575 WHERE m.id = imd.res_id AND imd.model = 'ir.model' """
577 if 'all_installed' in modules:
578 query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
579 query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
581 if 'all' not in modules:
582 query += ' WHERE module IN %s'
583 query_models += ' AND imd.module in %s'
584 query_param = (tuple(modules),)
585 query += ' ORDER BY module, model, name'
586 query_models += ' ORDER BY module, model'
588 cr.execute(query, query_param)
591 def push_translation(module, type, name, id, source):
592 tuple = (module, source, name, id, type)
593 if source and tuple not in _to_translate:
594 _to_translate.append(tuple)
597 if isinstance(s, unicode):
598 return s.encode('utf8')
601 for (xml_name,model,res_id,module) in cr.fetchall():
602 module = encode(module)
603 model = encode(model)
604 xml_name = "%s.%s" % (module, encode(xml_name))
606 if not pool.get(model):
607 _logger.error("Unable to find object %r", model)
610 exists = pool.get(model).exists(cr, uid, res_id)
612 _logger.warning("Unable to find object %r with id %d", model, res_id)
614 obj = pool.get(model).browse(cr, uid, res_id)
616 if model=='ir.ui.view':
617 d = etree.XML(encode(obj.arch))
618 for t in trans_parse_view(d):
619 push_translation(module, 'view', encode(obj.model), 0, t)
620 elif model=='ir.actions.wizard':
621 service_name = 'wizard.'+encode(obj.wiz_name)
622 import openerp.netsvc as netsvc
623 if netsvc.Service._services.get(service_name):
624 obj2 = netsvc.Service._services[service_name]
625 for state_name, state_def in obj2.states.iteritems():
626 if 'result' in state_def:
627 result = state_def['result']
628 if result['type'] != 'form':
630 name = "%s,%s" % (encode(obj.wiz_name), state_name)
633 'string': ('wizard_field', lambda s: [encode(s)]),
634 'selection': ('selection', lambda s: [encode(e[1]) for e in ((not callable(s)) and s or [])]),
635 'help': ('help', lambda s: [encode(s)]),
639 if not result.has_key('fields'):
640 _logger.warning("res has no fields: %r", result)
642 for field_name, field_def in result['fields'].iteritems():
643 res_name = name + ',' + field_name
645 for fn in def_params:
647 transtype, modifier = def_params[fn]
648 for val in modifier(field_def[fn]):
649 push_translation(module, transtype, res_name, 0, val)
652 arch = result['arch']
653 if arch and not isinstance(arch, UpdateableStr):
655 for t in trans_parse_view(d):
656 push_translation(module, 'wizard_view', name, 0, t)
658 # export button labels
659 for but_args in result['state']:
660 button_name = but_args[0]
661 button_label = but_args[1]
662 res_name = name + ',' + button_name
663 push_translation(module, 'wizard_button', res_name, 0, button_label)
665 elif model=='ir.model.fields':
667 field_name = encode(obj.name)
668 except AttributeError, exc:
669 _logger.error("name error in %s: %s", xml_name, str(exc))
671 objmodel = pool.get(obj.model)
672 if not objmodel or not field_name in objmodel._columns:
674 field_def = objmodel._columns[field_name]
676 name = "%s,%s" % (encode(obj.model), field_name)
677 push_translation(module, 'field', name, 0, encode(field_def.string))
680 push_translation(module, 'help', name, 0, encode(field_def.help))
682 if field_def.translate:
683 ids = objmodel.search(cr, uid, [])
684 obj_values = objmodel.read(cr, uid, ids, [field_name])
685 for obj_value in obj_values:
686 res_id = obj_value['id']
687 if obj.name in ('ir.model', 'ir.ui.menu'):
689 model_data_ids = model_data_obj.search(cr, uid, [
690 ('model', '=', model),
691 ('res_id', '=', res_id),
693 if not model_data_ids:
694 push_translation(module, 'model', name, 0, encode(obj_value[field_name]))
696 if hasattr(field_def, 'selection') and isinstance(field_def.selection, (list, tuple)):
697 for dummy, val in field_def.selection:
698 push_translation(module, 'selection', name, 0, encode(val))
700 elif model=='ir.actions.report.xml':
701 name = encode(obj.report_name)
704 fname = obj.report_rml
705 parse_func = trans_parse_rml
706 report_type = "report"
708 fname = obj.report_xsl
709 parse_func = trans_parse_xsl
711 if fname and obj.report_type in ('pdf', 'xsl'):
713 report_file = misc.file_open(fname)
715 d = etree.parse(report_file)
716 for t in parse_func(d.iter()):
717 push_translation(module, report_type, name, 0, t)
720 except (IOError, etree.XMLSyntaxError):
721 _logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
723 for field_name,field_def in obj._table._columns.items():
724 if field_def.translate:
725 name = model + "," + field_name
727 trad = getattr(obj, field_name) or ''
730 push_translation(module, 'model', name, xml_name, encode(trad))
732 # End of data for ir.model.data query results
734 cr.execute(query_models, query_param)
736 def push_constraint_msg(module, term_type, model, msg):
737 # Check presence of __call__ directly instead of using
738 # callable() because it will be deprecated as of Python 3.0
739 if not hasattr(msg, '__call__'):
740 push_translation(module, term_type, model, 0, encode(msg))
742 for (model_id, model, module) in cr.fetchall():
743 module = encode(module)
744 model = encode(model)
746 model_obj = pool.get(model)
749 _logger.error("Unable to find object %r", model)
752 for constraint in getattr(model_obj, '_constraints', []):
753 push_constraint_msg(module, 'constraint', model, constraint[1])
755 for constraint in getattr(model_obj, '_sql_constraints', []):
756 push_constraint_msg(module, 'sql_constraint', model, constraint[2])
758 # parse source code for _() calls
759 def get_module_from_path(path, mod_paths=None):
761 # First, construct a list of possible paths
762 def_path = os.path.abspath(os.path.join(config.config['root_path'], 'addons')) # default addons path (base)
763 ad_paths= map(lambda m: os.path.abspath(m.strip()),config.config['addons_path'].split(','))
766 mod_paths.append(adp)
767 if not os.path.isabs(adp):
768 mod_paths.append(adp)
769 elif adp.startswith(def_path):
770 mod_paths.append(adp[len(def_path)+1:])
772 if path.startswith(mp) and (os.path.dirname(path) != mp):
773 path = path[len(mp)+1:]
774 return path.split(os.path.sep)[0]
775 return 'base' # files that are not in a module are considered as being in 'base' module
777 modobj = pool.get('ir.module.module')
778 installed_modids = modobj.search(cr, uid, [('state', '=', 'installed')])
779 installed_modules = map(lambda m: m['name'], modobj.read(cr, uid, installed_modids, ['name']))
781 root_path = os.path.join(config.config['root_path'], 'addons')
783 apaths = map(os.path.abspath, map(str.strip, config.config['addons_path'].split(',')))
784 if root_path in apaths:
787 path_list = [root_path,] + apaths
789 # Also scan these non-addon paths
790 for bin_path in ['osv', 'report' ]:
791 path_list.append(os.path.join(config.config['root_path'], bin_path))
793 _logger.debug("Scanning modules at paths: ", path_list)
796 join_dquotes = re.compile(r'([^\\])"[\s\\]*"', re.DOTALL)
797 join_quotes = re.compile(r'([^\\])\'[\s\\]*\'', re.DOTALL)
798 re_dquotes = re.compile(r'[^a-zA-Z0-9_]_\([\s]*"(.+?)"[\s]*?\)', re.DOTALL)
799 re_quotes = re.compile(r'[^a-zA-Z0-9_]_\([\s]*\'(.+?)\'[\s]*?\)', re.DOTALL)
801 def export_code_terms_from_file(fname, path, root, terms_type):
802 fabsolutepath = join(root, fname)
803 frelativepath = fabsolutepath[len(path):]
804 module = get_module_from_path(fabsolutepath, mod_paths=mod_paths)
805 is_mod_installed = module in installed_modules
806 if (('all' in modules) or (module in modules)) and is_mod_installed:
807 _logger.debug("Scanning code of %s at module: %s", frelativepath, module)
808 src_file = misc.file_open(fabsolutepath, subdir='')
810 code_string = src_file.read()
813 if module in installed_modules:
814 frelativepath = str("addons" + frelativepath)
815 ite = re_dquotes.finditer(code_string)
820 if src.startswith('""'):
821 assert src.endswith('""'), "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % (frelativepath, src[:30])
824 src = join_dquotes.sub(r'\1', src)
825 # try to count the lines from the last pos to our place:
826 code_line += code_string[code_offset:i.start(1)].count('\n')
827 # now, since we did a binary read of a python source file, we
828 # have to expand pythonic escapes like the interpreter does.
829 src = src.decode('string_escape')
830 push_translation(module, terms_type, frelativepath, code_line, encode(src))
831 code_line += i.group(1).count('\n')
832 code_offset = i.end() # we have counted newlines up to the match end
834 ite = re_quotes.finditer(code_string)
835 code_offset = 0 #reset counters
839 if src.startswith("''"):
840 assert src.endswith("''"), "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % (frelativepath, src[:30])
843 src = join_quotes.sub(r'\1', src)
844 code_line += code_string[code_offset:i.start(1)].count('\n')
845 src = src.decode('string_escape')
846 push_translation(module, terms_type, frelativepath, code_line, encode(src))
847 code_line += i.group(1).count('\n')
848 code_offset = i.end() # we have counted newlines up to the match end
850 for path in path_list:
851 _logger.debug("Scanning files of modules at %s", path)
852 for root, dummy, files in osutil.walksymlinks(path):
853 for fname in itertools.chain(fnmatch.filter(files, '*.py')):
854 export_code_terms_from_file(fname, path, root, 'code')
855 for fname in itertools.chain(fnmatch.filter(files, '*.mako')):
856 export_code_terms_from_file(fname, path, root, 'report')
859 out = [["module","type","name","res_id","src","value"]] # header
861 # translate strings marked as to be translated
862 for module, source, name, id, type in _to_translate:
863 trans = trans_obj._get_source(cr, uid, name, type, lang, source)
864 out.append([module, type, name, id, source, encode(trans) or ''])
868 def trans_load(cr, filename, lang, verbose=True, context=None):
870 fileobj = misc.file_open(filename)
871 _logger.info("loading %s", filename)
872 fileformat = os.path.splitext(filename)[-1][1:].lower()
873 r = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, context=context)
878 _logger.error("couldn't read translation file %s", filename)
881 def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, context=None):
882 """Populates the ir_translation table."""
884 _logger.info('loading translation file for language %s', lang)
888 pool = pooler.get_pool(db_name)
889 lang_obj = pool.get('res.lang')
890 trans_obj = pool.get('ir.translation')
891 iso_lang = misc.get_iso_codes(lang)
894 ids = lang_obj.search(cr, uid, [('code','=', lang)])
897 # lets create the language with locale information
898 lang_obj.load_lang(cr, 1, lang=lang, lang_name=lang_name)
901 # now, the serious things: we read the language file
903 if fileformat == 'csv':
904 reader = csv.reader(fileobj, quotechar='"', delimiter=',')
905 # read the first line of the file (it contains columns titles)
909 elif fileformat == 'po':
910 reader = TinyPoFile(fileobj)
911 f = ['type', 'name', 'res_id', 'src', 'value']
913 _logger.error('Bad file format: %s', fileformat)
914 raise Exception(_('Bad file format'))
916 # read the rest of the file
918 irt_cursor = trans_obj._get_import_cursor(cr, uid, context=context)
922 # skip empty rows and rows where the translation field (=last fiefd) is empty
923 #if (not row) or (not row[-1]):
926 # dictionary which holds values for this line of the csv file
927 # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
928 # 'src': ..., 'value': ...}
931 for i in range(len(f)):
932 if f[i] in ('module',):
936 # This would skip terms that fail to specify a res_id
937 if not dic.get('res_id', False):
940 res_id = dic.pop('res_id')
941 if res_id and isinstance(res_id, (int, long)) \
942 or (isinstance(res_id, basestring) and res_id.isdigit()):
943 dic['res_id'] = int(res_id)
946 tmodel = dic['name'].split(',')[0]
948 tmodule, tname = res_id.split('.', 1)
952 dic['imd_model'] = tmodel
953 dic['imd_module'] = tmodule
954 dic['imd_name'] = tname
958 _logger.warning("Could not decode resource for %s, please fix the po file.",
959 dic['res_id'], exc_info=True)
966 _logger.info("translation file loaded succesfully")
968 filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
969 _logger.exception("couldn't read translation file %s", filename)
971 def get_locales(lang=None):
973 lang = locale.getdefaultlocale()[0]
976 lang = _LOCALE2WIN32.get(lang, lang)
979 ln = locale._build_localename((lang, enc))
981 nln = locale.normalize(ln)
985 for x in process('utf8'): yield x
987 prefenc = locale.getpreferredencoding()
989 for x in process(prefenc): yield x
993 'iso-8859-1': 'iso8859-15',
995 }.get(prefenc.lower())
997 for x in process(prefenc): yield x
1004 # locale.resetlocale is bugged with some locales.
1005 for ln in get_locales():
1007 return locale.setlocale(locale.LC_ALL, ln)
1008 except locale.Error:
1011 def load_language(cr, lang):
1012 """Loads a translation terms for a language.
1013 Used mainly to automate language loading at db initialization.
1015 :param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
1018 pool = pooler.get_pool(cr.dbname)
1019 language_installer = pool.get('base.language.install')
1021 oid = language_installer.create(cr, uid, {'lang': lang})
1022 language_installer.lang_install(cr, uid, [oid], context=None)
1024 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: