X-Git-Url: http://git.inspyration.org/?a=blobdiff_plain;f=bin%2Ftools%2Fmisc.py;h=b45f3881c6501e0484ac4fe976f403614a01b438;hb=896c74fa23dd6fee2f8f81d89ffb8227975b0e6b;hp=e3a96772b83078a9f3a944c36b187aa6ceec77fa;hpb=02e18c4262b2d4edf44b1fabf997891ba48e7e18;p=odoo%2Fodoo.git diff --git a/bin/tools/misc.py b/bin/tools/misc.py index e3a9677..b45f388 100644 --- a/bin/tools/misc.py +++ b/bin/tools/misc.py @@ -1,21 +1,21 @@ -# -*- encoding: utf-8 -*- +# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution -# Copyright (C) 2004-2009 Tiny SPRL (). All Rights Reserved -# $Id$ +# Copyright (C) 2004-2009 Tiny SPRL (). +# Copyright (C) 2010 OpenERP s.a. (). # # This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# GNU Affero General Public License for more details. # -# You should have received a copy of the GNU General Public License +# You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## @@ -24,135 +24,161 @@ Miscelleanous tools used by OpenERP. """ -import os, time, sys import inspect - -from config import config - -import zipfile -import release +import subprocess +import logging +import os +import re +import smtplib import socket - +import sys +import threading +import time +import warnings +import zipfile +from datetime import datetime +from email.MIMEText import MIMEText +from email.MIMEBase import MIMEBase +from email.MIMEMultipart import MIMEMultipart +from email.Header import Header +from email.Utils import formatdate, COMMASPACE +from email import Encoders +from itertools import islice, izip +from lxml import etree +from which import which if sys.version_info[:2] < (2, 4): from threadinglocal import local else: from threading import local +try: + from html2text import html2text +except ImportError: + html2text = None -from itertools import izip +import netsvc +from config import config +from lru import LRU + +_logger = logging.getLogger('tools') + +# List of etree._Element subclasses that we choose to ignore when parsing XML. +# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones. +SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase) # initialize a database with base/base.sql def init_db(cr): import addons f = addons.get_module_resource('base', 'base.sql') - for line in file_open(f).read().split(';'): - if (len(line)>0) and (not line.isspace()): - cr.execute(line) - cr.commit() + base_sql_file = file_open(f) + try: + cr.execute(base_sql_file.read()) + cr.commit() + finally: + base_sql_file.close() for i in addons.get_modules(): - terp_file = addons.get_module_resource(i, '__terp__.py') mod_path = addons.get_module_path(i) if not mod_path: continue - info = False - if os.path.isfile(terp_file) or os.path.isfile(mod_path+'.zip'): - info = eval(file_open(terp_file).read()) - if info: - categs = info.get('category', 'Uncategorized').split('/') - p_id = None - while categs: - if p_id is not None: - cr.execute('select id \ - from ir_module_category \ - where name=%s and parent_id=%s', (categs[0], p_id)) - else: - cr.execute('select id \ - from ir_module_category \ - where name=%s and parent_id is NULL', (categs[0],)) - c_id = cr.fetchone() - if not c_id: - cr.execute('select nextval(\'ir_module_category_id_seq\')') - c_id = cr.fetchone()[0] - cr.execute('insert into ir_module_category \ - (id, name, parent_id) \ - values (%s, %s, %s)', (c_id, categs[0], p_id)) - else: - c_id = c_id[0] - p_id = c_id - categs = categs[1:] - - active = info.get('active', False) - installable = info.get('installable', True) - if installable: - if active: - state = 'to install' - else: - state = 'uninstalled' + + info = addons.load_information_from_description_file(i) + + if not info: + continue + categs = info.get('category', 'Uncategorized').split('/') + p_id = None + while categs: + if p_id is not None: + cr.execute('SELECT id \ + FROM ir_module_category \ + WHERE name=%s AND parent_id=%s', (categs[0], p_id)) + else: + cr.execute('SELECT id \ + FROM ir_module_category \ + WHERE name=%s AND parent_id IS NULL', (categs[0],)) + c_id = cr.fetchone() + if not c_id: + cr.execute('INSERT INTO ir_module_category \ + (name, parent_id) \ + VALUES (%s, %s) RETURNING id', (categs[0], p_id)) + c_id = cr.fetchone()[0] + else: + c_id = c_id[0] + p_id = c_id + categs = categs[1:] + + active = info.get('active', False) + installable = info.get('installable', True) + if installable: + if active: + state = 'to install' else: - state = 'uninstallable' - cr.execute('select nextval(\'ir_module_module_id_seq\')') - id = cr.fetchone()[0] - cr.execute('insert into ir_module_module \ - (id, author, website, name, shortdesc, description, \ - category_id, state, certificate) \ - values (%s, %s, %s, %s, %s, %s, %s, %s, %s)', ( - id, info.get('author', ''), - info.get('website', ''), i, info.get('name', False), - info.get('description', ''), p_id, state, info.get('certificate'))) - cr.execute('insert into ir_model_data \ - (name,model,module, res_id, noupdate) values (%s,%s,%s,%s,%s)', ( - 'module_meta_information', 'ir.module.module', i, id, True)) - dependencies = info.get('depends', []) - for d in dependencies: - cr.execute('insert into ir_module_module_dependency \ - (module_id,name) values (%s, %s)', (id, d)) - cr.commit() + state = 'uninstalled' + else: + state = 'uninstallable' + cr.execute('INSERT INTO ir_module_module \ + (author, website, name, shortdesc, description, \ + category_id, state, certificate, web, license) \ + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING id', ( + info.get('author', ''), + info.get('website', ''), i, info.get('name', False), + info.get('description', ''), p_id, state, info.get('certificate') or None, + info.get('web') or False, + info.get('license') or 'AGPL-3')) + id = cr.fetchone()[0] + cr.execute('INSERT INTO ir_model_data \ + (name,model,module, res_id, noupdate) VALUES (%s,%s,%s,%s,%s)', ( + 'module_meta_information', 'ir.module.module', i, id, True)) + dependencies = info.get('depends', []) + for d in dependencies: + cr.execute('INSERT INTO ir_module_module_dependency \ + (module_id,name) VALUES (%s, %s)', (id, d)) + cr.commit() def find_in_path(name): - if os.name == "nt": - sep = ';' - else: - sep = ':' - path = [dir for dir in os.environ['PATH'].split(sep) - if os.path.isdir(dir)] - for dir in path: - val = os.path.join(dir, name) - if os.path.isfile(val) or os.path.islink(val): - return val - return None + try: + return which(name) + except IOError: + return None def find_pg_tool(name): + path = None if config['pg_path'] and config['pg_path'] != 'None': - return os.path.join(config['pg_path'], name) - else: - return find_in_path(name) + path = config['pg_path'] + try: + return which(name, path=path) + except IOError: + return None def exec_pg_command(name, *args): prog = find_pg_tool(name) if not prog: raise Exception('Couldn\'t find %s' % name) - args2 = (os.path.basename(prog),) + args - return os.spawnv(os.P_WAIT, prog, args2) + args2 = (prog,) + args + + return subprocess.call(args2) def exec_pg_command_pipe(name, *args): prog = find_pg_tool(name) if not prog: raise Exception('Couldn\'t find %s' % name) - if os.name == "nt": - cmd = '"' + prog + '" ' + ' '.join(args) - else: - cmd = prog + ' ' + ' '.join(args) - return os.popen2(cmd, 'b') + # on win32, passing close_fds=True is not compatible + # with redirecting std[in/err/out] + pop = subprocess.Popen((prog,) + args, bufsize= -1, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + close_fds=(os.name=="posix")) + return (pop.stdin, pop.stdout) def exec_command_pipe(name, *args): prog = find_in_path(name) if not prog: raise Exception('Couldn\'t find %s' % name) - if os.name == "nt": - cmd = '"'+prog+'" '+' '.join(args) - else: - cmd = prog+' '+' '.join(args) - return os.popen2(cmd, 'b') + # on win32, passing close_fds=True is not compatible + # with redirecting std[in/err/out] + pop = subprocess.Popen((prog,) + args, bufsize= -1, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + close_fds=(os.name=="posix")) + return (pop.stdin, pop.stdout) #---------------------------------------------------------- # File paths @@ -174,8 +200,8 @@ def file_open(name, mode="r", subdir='addons', pathinfo=False): @return: fileobject if pathinfo is False else (fileobject, filepath) """ - - adp = os.path.normcase(os.path.abspath(config['addons_path'])) + import addons + adps = addons.ad_paths rtp = os.path.normcase(os.path.abspath(config['root_path'])) if name.replace(os.path.sep, '/').startswith('addons/'): @@ -190,18 +216,19 @@ def file_open(name, mode="r", subdir='addons', pathinfo=False): subdir2 = (subdir2 != 'addons' or None) and subdir2 - try: - if subdir2: - fn = os.path.join(adp, subdir2, name) - else: - fn = os.path.join(adp, name) - fn = os.path.normpath(fn) - fo = file_open(fn, mode=mode, subdir=None, pathinfo=pathinfo) - if pathinfo: - return fo, fn - return fo - except IOError, e: - pass + for adp in adps: + try: + if subdir2: + fn = os.path.join(adp, subdir2, name) + else: + fn = os.path.join(adp, name) + fn = os.path.normpath(fn) + fo = file_open(fn, mode=mode, subdir=None, pathinfo=pathinfo) + if pathinfo: + return fo, fn + return fo + except IOError: + pass if subdir: name = os.path.join(rtp, subdir, name) @@ -234,7 +261,7 @@ def file_open(name, mode="r", subdir='addons', pathinfo=False): if pathinfo: return fo, name return fo - except: + except Exception: name2 = os.path.normpath(os.path.join(head + '.zip', zipname)) pass for i in (name2, name): @@ -245,7 +272,7 @@ def file_open(name, mode="r", subdir='addons', pathinfo=False): return fo if os.path.splitext(name)[1] == '.rml': raise IOError, 'Report %s doesn\'t exist or deleted : ' %str(name) - raise IOError, 'File not found : '+str(name) + raise IOError, 'File not found : %s' % name #---------------------------------------------------------- @@ -302,80 +329,109 @@ def reverse_enumerate(l): #---------------------------------------------------------- # Emails #---------------------------------------------------------- -def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False, - attach=None, tinycrm=False, ssl=False, debug=False, subtype='plain', x_headers=None): - - """Send an email.""" - import smtplib - from email.MIMEText import MIMEText - from email.MIMEBase import MIMEBase - from email.MIMEMultipart import MIMEMultipart - from email.Header import Header - from email.Utils import formatdate, COMMASPACE - from email.Utils import formatdate, COMMASPACE - from email import Encoders - import netsvc - - if x_headers is None: - x_headers = {} - - if not ssl: - ssl = config.get('smtp_ssl', False) - - if not email_from and not config['email_from']: - raise Exception("No Email sender by default, see config file") +email_re = re.compile(r""" + ([a-zA-Z][\w\.-]*[a-zA-Z0-9] # username part + @ # mandatory @ sign + [a-zA-Z0-9][\w\.-]* # domain must start with a letter ... Ged> why do we include a 0-9 then? + \. + [a-z]{2,3} # TLD + ) + """, re.VERBOSE) +res_re = re.compile(r"\[([0-9]+)\]", re.UNICODE) +command_re = re.compile("^Set-([a-z]+) *: *(.+)$", re.I + re.UNICODE) +reference_re = re.compile("<.*-openobject-(\\d+)@(.*)>", re.UNICODE) + +priorities = { + '1': '1 (Highest)', + '2': '2 (High)', + '3': '3 (Normal)', + '4': '4 (Low)', + '5': '5 (Lowest)', + } - if not email_cc: - email_cc = [] - if not email_bcc: - email_bcc = [] +def html2plaintext(html, body_id=None, encoding='utf-8'): + ## (c) Fry-IT, www.fry-it.com, 2007 + ## + ## download here: http://www.peterbe.com/plog/html2plaintext - if not attach: - try: - msg = MIMEText(body.encode('utf8') or '',_subtype=subtype,_charset='utf-8') - except: - msg = MIMEText(body or '',_subtype=subtype,_charset='utf-8') - else: - msg = MIMEMultipart() - msg['Subject'] = Header(ustr(subject), 'utf-8') - msg['From'] = email_from - del msg['Reply-To'] - if reply_to: - msg['Reply-To'] = reply_to - else: - msg['Reply-To'] = msg['From'] - msg['To'] = COMMASPACE.join(email_to) - if email_cc: - msg['Cc'] = COMMASPACE.join(email_cc) - if email_bcc: - msg['Bcc'] = COMMASPACE.join(email_bcc) - msg['Date'] = formatdate(localtime=True) + """ from an HTML text, convert the HTML to plain text. + If @body_id is provided then this is the tag where the + body (not necessarily ) starts. + """ - # Add OpenERP Server information - msg['X-Generated-By'] = 'OpenERP (http://www.openerp.com)' - msg['X-OpenERP-Server-Host'] = socket.gethostname() - msg['X-OpenERP-Server-Version'] = release.version + html = ustr(html) - # Add dynamic X Header - for key, value in x_headers.items(): - msg['X-OpenERP-%s' % key] = str(value) + from lxml.etree import tostring + try: + from lxml.html.soupparser import fromstring + kwargs = {} + except ImportError: + _logger.debug('tools.misc.html2plaintext: cannot use BeautifulSoup, fallback to lxml.etree.HTMLParser') + from lxml.etree import fromstring, HTMLParser + kwargs = dict(parser=HTMLParser()) - if tinycrm: - msg['Message-Id'] = "<%s-tinycrm-%s@%s>" % (time.time(), tinycrm, socket.gethostname()) + tree = fromstring(html, **kwargs) - if attach: - try: - msg.attach(MIMEText(body.encode('utf8') or '',_subtype=subtype,_charset='utf-8')) - except: - msg.attach(MIMEText(body or '', _charset='utf-8', _subtype=subtype) ) - for (fname,fcontent) in attach: - part = MIMEBase('application', "octet-stream") - part.set_payload( fcontent ) - Encoders.encode_base64(part) - part.add_header('Content-Disposition', 'attachment; filename="%s"' % (fname,)) - msg.attach(part) - + if body_id is not None: + source = tree.xpath('//*[@id=%s]'%(body_id,)) + else: + source = tree.xpath('//body') + if len(source): + tree = source[0] + + url_index = [] + i = 0 + for link in tree.findall('.//a'): + url = link.get('href') + if url: + i += 1 + link.tag = 'span' + link.text = '%s [%s]' % (link.text, i) + url_index.append(url) + + html = ustr(tostring(tree, encoding=encoding)) + + html = html.replace('','*').replace('','*') + html = html.replace('','*').replace('','*') + html = html.replace('

','*').replace('

','*') + html = html.replace('

','**').replace('

','**') + html = html.replace('

','**').replace('

','**') + html = html.replace('','/').replace('','/') + html = html.replace('', '\n') + html = html.replace('

', '\n') + html = re.sub('', '\n', html) + html = re.sub('<.*?>', ' ', html) + html = html.replace(' ' * 2, ' ') + + # strip all lines + html = '\n'.join([x.strip() for x in html.splitlines()]) + html = html.replace('\n' * 2, '\n') + + for i, url in enumerate(url_index): + if i == 0: + html += '\n\n' + html += ustr('[%s] %s\n') % (i+1, url) + + return html + +def generate_tracking_message_id(openobject_id): + """Returns a string that can be used in the Message-ID RFC822 header field so we + can track the replies related to a given object thanks to the "In-Reply-To" or + "References" fields that Mail User Agents will set. + """ + return "<%s-openobject-%s@%s>" % (time.time(), openobject_id, socket.gethostname()) + +def _email_send(smtp_from, smtp_to_list, message, openobject_id=None, ssl=False, debug=False): + """Low-level method to send directly a Message through the configured smtp server. + :param smtp_from: RFC-822 envelope FROM (not displayed to recipient) + :param smtp_to_list: RFC-822 envelope RCPT_TOs (not displayed to recipient) + :param message: an email.message.Message to send + :param debug: True if messages should be output to stderr before being sent, + and smtplib.SMTP put into debug mode. + :return: True if the mail was delivered successfully to the smtp, + else False (+ exception logged) + """ class WriteToLogger(object): def __init__(self): self.logger = netsvc.Logger() @@ -383,18 +439,29 @@ def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=Non def write(self, s): self.logger.notifyChannel('email_send', netsvc.LOG_DEBUG, s) + if openobject_id: + message['Message-Id'] = generate_tracking_message_id(openobject_id) + try: + smtp_server = config['smtp_server'] + + if smtp_server.startswith('maildir:/'): + from mailbox import Maildir + maildir_path = smtp_server[8:] + mdir = Maildir(maildir_path,factory=None, create = True) + mdir.add(message.as_string(True)) + return True + oldstderr = smtplib.stderr + if not ssl: ssl = config.get('smtp_ssl', False) s = smtplib.SMTP() - try: # in case of debug, the messages are printed to stderr. if debug: smtplib.stderr = WriteToLogger() s.set_debuglevel(int(bool(debug))) # 0 or 1 - - s.connect(config['smtp_server'], config['smtp_port']) + s.connect(smtp_server, config['smtp_port']) if ssl: s.ehlo() s.starttls() @@ -403,22 +470,93 @@ def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=Non if config['smtp_user'] or config['smtp_password']: s.login(config['smtp_user'], config['smtp_password']) - s.sendmail(email_from, - flatten([email_to, email_cc, email_bcc]), - msg.as_string() - ) - + s.sendmail(smtp_from, smtp_to_list, message.as_string()) finally: - s.quit() - if debug: - smtplib.stderr = oldstderr + try: + s.quit() + if debug: + smtplib.stderr = oldstderr + except Exception: + # ignored, just a consequence of the previous exception + pass - except Exception, e: - netsvc.Logger().notifyChannel('email_send', netsvc.LOG_ERROR, e) + except Exception: + _logger.error('could not deliver email', exc_info=True) return False - + return True + +def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False, + attach=None, openobject_id=False, ssl=False, debug=False, subtype='plain', x_headers=None, priority='3'): + + """Send an email. + + Arguments: + + `email_from`: A string used to fill the `From` header, if falsy, + config['email_from'] is used instead. Also used for + the `Reply-To` header if `reply_to` is not provided + + `email_to`: a sequence of addresses to send the mail to. + """ + if x_headers is None: + x_headers = {} + + + if not (email_from or config['email_from']): + raise ValueError("Sending an email requires either providing a sender " + "address or having configured one") + + if not email_from: email_from = config.get('email_from', False) + email_from = ustr(email_from).encode('utf-8') + + if not email_cc: email_cc = [] + if not email_bcc: email_bcc = [] + if not body: body = u'' + + email_body = ustr(body).encode('utf-8') + email_text = MIMEText(email_body or '',_subtype=subtype,_charset='utf-8') + + msg = MIMEMultipart() + + msg['Subject'] = Header(ustr(subject), 'utf-8') + msg['From'] = email_from + del msg['Reply-To'] + if reply_to: + msg['Reply-To'] = reply_to + else: + msg['Reply-To'] = msg['From'] + msg['To'] = COMMASPACE.join(email_to) + if email_cc: + msg['Cc'] = COMMASPACE.join(email_cc) + msg['Date'] = formatdate(localtime=True) + + msg['X-Priority'] = priorities.get(priority, '3 (Normal)') + + # Add dynamic X Header + for key, value in x_headers.iteritems(): + msg['%s' % key] = str(value) + + if html2text and subtype == 'html': + text = html2text(email_body.decode('utf-8')).encode('utf-8') + alternative_part = MIMEMultipart(_subtype="alternative") + alternative_part.attach(MIMEText(text, _charset='utf-8', _subtype='plain')) + alternative_part.attach(email_text) + msg.attach(alternative_part) + else: + msg.attach(email_text) + + if attach: + for (fname,fcontent) in attach: + part = MIMEBase('application', "octet-stream") + part.set_payload( fcontent ) + Encoders.encode_base64(part) + part.add_header('Content-Disposition', 'attachment; filename="%s"' % (fname,)) + msg.attach(part) + + return _email_send(email_from, flatten([email_to, email_cc, email_bcc]), msg, openobject_id=openobject_id, ssl=ssl, debug=debug) + #---------------------------------------------------------- # SMS #---------------------------------------------------------- @@ -428,7 +566,7 @@ def sms_send(user, password, api_id, text, to): url = "http://api.urlsms.com/SendSMS.aspx" #url = "http://196.7.150.220/http/sendmsg" params = urllib.urlencode({'UserID': user, 'Password': password, 'SenderID': api_id, 'MsgText': text, 'RecipientMobileNo':to}) - f = urllib.urlopen(url+"?"+params) + urllib.urlopen(url+"?"+params) # FIXME: Use the logger if there is an error return True @@ -578,10 +716,10 @@ class cache(object): Use it as a decorator of the function you plan to cache Timeout: 0 = no timeout, otherwise in seconds """ - + __caches = [] - - def __init__(self, timeout=None, skiparg=2, multi=None): + + def __init__(self, timeout=None, skiparg=2, multi=None, size=8192): assert skiparg >= 2 # at least self and cr if timeout is None: self.timeout = config['cache_timeout'] @@ -590,16 +728,16 @@ class cache(object): self.skiparg = skiparg self.multi = multi self.lasttime = time.time() - self.cache = {} - self.fun = None + self.cache = LRU(size) # TODO take size from config + self.fun = None cache.__caches.append(self) - + def _generate_keys(self, dbname, kwargs2): """ Generate keys depending of the arguments and the self.mutli value """ - + def to_tuple(d): pairs = d.items() pairs.sort(key=lambda (k,v): k) @@ -616,32 +754,32 @@ class cache(object): key = (('dbname', dbname),) + to_tuple(kwargs2) yield key, None else: - multis = kwargs2[self.multi][:] + multis = kwargs2[self.multi][:] for id in multis: kwargs2[self.multi] = (id,) key = (('dbname', dbname),) + to_tuple(kwargs2) yield key, id - + def _unify_args(self, *args, **kwargs): # Update named arguments with positional argument values (without self and cr) kwargs2 = self.fun_default_values.copy() kwargs2.update(kwargs) kwargs2.update(dict(zip(self.fun_arg_names, args[self.skiparg-2:]))) return kwargs2 - + def clear(self, dbname, *args, **kwargs): """clear the cache for database dbname if *args and **kwargs are both empty, clear all the keys related to this database """ if not args and not kwargs: - keys_to_del = [key for key in self.cache if key[0][1] == dbname] + keys_to_del = [key for key in self.cache.keys() if key[0][1] == dbname] else: kwargs2 = self._unify_args(*args, **kwargs) - keys_to_del = [key for key, _ in self._generate_keys(dbname, kwargs2) if key in self.cache] - + keys_to_del = [key for key, _ in self._generate_keys(dbname, kwargs2) if key in self.cache.keys()] + for key in keys_to_del: - del self.cache[key] - + self.cache.pop(key) + @classmethod def clean_caches_for_db(cls, dbname): for c in cls.__caches: @@ -657,14 +795,14 @@ class cache(object): self.fun_default_values = {} if argspec[3]: self.fun_default_values = dict(zip(self.fun_arg_names[-len(argspec[3]):], argspec[3])) - + def cached_result(self2, cr, *args, **kwargs): if time.time()-int(self.timeout) > self.lasttime: self.lasttime = time.time() - t = time.time()-int(self.timeout) - old_keys = [key for key in self.cache if self.cache[key][1] < t] + t = time.time()-int(self.timeout) + old_keys = [key for key in self.cache.keys() if self.cache[key][1] < t] for key in old_keys: - del self.cache[key] + self.cache.pop(key) kwargs2 = self._unify_args(*args, **kwargs) @@ -675,11 +813,11 @@ class cache(object): result[id] = self.cache[key][0] else: notincache[id] = key - + if notincache: if self.multi: kwargs2[self.multi] = notincache.keys() - + result2 = fn(self2, cr, *args[:self.skiparg-2], **kwargs2) if not self.multi: key = notincache[None] @@ -690,7 +828,7 @@ class cache(object): key = notincache[id] self.cache[key] = (result2[id], time.time()) result.update(result2) - + if not self.multi: return result[None] return result @@ -701,48 +839,72 @@ class cache(object): def to_xml(s): return s.replace('&','&').replace('<','<').replace('>','>') -def ustr(value): +def get_encodings(hint_encoding='utf-8'): + fallbacks = { + 'latin1': 'latin9', + 'iso-8859-1': 'iso8859-15', + 'cp1252': '1252', + } + if hint_encoding: + yield hint_encoding + if hint_encoding.lower() in fallbacks: + yield fallbacks[hint_encoding.lower()] + + # some defaults (also taking care of pure ASCII) + for charset in ['utf8','latin1']: + if not (hint_encoding) or (charset.lower() != hint_encoding.lower()): + yield charset + + from locale import getpreferredencoding + prefenc = getpreferredencoding() + if prefenc and prefenc.lower() != 'utf-8': + yield prefenc + prefenc = fallbacks.get(prefenc.lower()) + if prefenc: + yield prefenc + + +def ustr(value, hint_encoding='utf-8'): """This method is similar to the builtin `str` method, except - it will return Unicode string. + it will return unicode() string. @param value: the value to convert + @param hint_encoding: an optional encoding that was detected + upstream and should be tried first to + decode ``value``. @rtype: unicode @return: unicode string """ + if isinstance(value, Exception): + return exception_to_unicode(value) if isinstance(value, unicode): return value - if hasattr(value, '__unicode__'): - return unicode(value) - - if not isinstance(value, str): - value = str(value) - - try: # first try utf-8 - return unicode(value, 'utf-8') - except: - pass + if not isinstance(value, basestring): + try: + return unicode(value) + except Exception: + raise UnicodeError('unable to convert %r' % (value,)) - try: # then extened iso-8858 - return unicode(value, 'iso-8859-15') - except: - pass + for ln in get_encodings(hint_encoding): + try: + return unicode(value, ln) + except Exception: + pass + raise UnicodeError('unable to convert %r' % (value,)) - # else use default system locale - from locale import getlocale - return unicode(value, getlocale()[1]) def exception_to_unicode(e): - if hasattr(e, 'message'): + if (sys.version_info[:2] < (2,6)) and hasattr(e, 'message'): return ustr(e.message) if hasattr(e, 'args'): return "\n".join((ustr(a) for a in e.args)) try: return ustr(e) - except: - return u"Unknow message" + except Exception: + return u"Unknown message" # to be compatible with python 2.4 @@ -751,73 +913,118 @@ if not hasattr(__builtin__, 'all'): def all(iterable): for element in iterable: if not element: - return False + return False return True - + __builtin__.all = all del all - + if not hasattr(__builtin__, 'any'): def any(iterable): for element in iterable: if element: - return True + return True return False - + __builtin__.any = any del any - +def get_iso_codes(lang): + if lang.find('_') != -1: + if lang.split('_')[0] == lang.split('_')[1].lower(): + lang = lang.split('_')[0] + return lang def get_languages(): + # The codes below are those from Launchpad's Rosetta, with the exception + # of some trivial codes where the Launchpad code is xx and we have xx_XX. languages={ + 'ab_RU': u'Abkhazian / аҧсуа', 'ar_AR': u'Arabic / الْعَرَبيّة', - 'bg_BG': u'Bulgarian / български', + 'bg_BG': u'Bulgarian / български език', 'bs_BS': u'Bosnian / bosanski jezik', 'ca_ES': u'Catalan / Català', 'cs_CZ': u'Czech / Čeština', 'da_DK': u'Danish / Dansk', 'de_DE': u'German / Deutsch', - 'el_EL': u'Greek / Ελληνικά', + 'el_GR': u'Greek / Ελληνικά', 'en_CA': u'English (CA)', 'en_GB': u'English (UK)', 'en_US': u'English (US)', 'es_AR': u'Spanish (AR) / Español (AR)', + 'es_BO': u'Spanish (BO) / Español (BO)', + 'es_CL': u'Spanish (CL) / Español (CL)', + 'es_CO': u'Spanish (CO) / Español (CO)', + 'es_CR': u'Spanish (CR) / Español (CR)', + 'es_DO': u'Spanish (DO) / Español (DO)', + 'es_EC': u'Spanish (EC) / Español (EC)', 'es_ES': u'Spanish / Español', + 'es_GT': u'Spanish (GT) / Español (GT)', + 'es_HN': u'Spanish (HN) / Español (HN)', + 'es_MX': u'Spanish (MX) / Español (MX)', + 'es_NI': u'Spanish (NI) / Español (NI)', + 'es_PA': u'Spanish (PA) / Español (PA)', + 'es_PE': u'Spanish (PE) / Español (PE)', + 'es_PR': u'Spanish (PR) / Español (PR)', + 'es_PY': u'Spanish (PY) / Español (PY)', + 'es_SV': u'Spanish (SV) / Español (SV)', + 'es_UY': u'Spanish (UY) / Español (UY)', + 'es_VE': u'Spanish (VE) / Español (VE)', 'et_EE': u'Estonian / Eesti keel', - 'fi_FI': u'Finland / Suomi', + 'fa_IR': u'Persian / فارس', + 'fi_FI': u'Finnish / Suomi', 'fr_BE': u'French (BE) / Français (BE)', 'fr_CH': u'French (CH) / Français (CH)', 'fr_FR': u'French / Français', + 'gl_ES': u'Galician / Galego', + 'gu_IN': u'Gujarati / ગુજરાતી', + 'he_IL': u'Hebrew / עִבְרִי', + 'hi_IN': u'Hindi / हिंदी', 'hr_HR': u'Croatian / hrvatski jezik', 'hu_HU': u'Hungarian / Magyar', 'id_ID': u'Indonesian / Bahasa Indonesia', 'it_IT': u'Italian / Italiano', + 'iu_CA': u'Inuktitut / ᐃᓄᒃᑎᑐᑦ', + 'ja_JP': u'Japanese / 日本語', + 'ko_KP': u'Korean (KP) / 한국어 (KP)', + 'ko_KR': u'Korean (KR) / 한국어 (KR)', 'lt_LT': u'Lithuanian / Lietuvių kalba', + 'lv_LV': u'Latvian / latviešu valoda', + 'ml_IN': u'Malayalam / മലയാളം', + 'mn_MN': u'Mongolian / монгол', + 'nb_NO': u'Norwegian Bokmål / Norsk bokmål', 'nl_NL': u'Dutch / Nederlands', - 'nl_BE': u'Dutch (Belgium) / Nederlands (Belgïe)', + 'nl_BE': u'Flemish (BE) / Vlaams (BE)', + 'oc_FR': u'Occitan (FR, post 1500) / Occitan', 'pl_PL': u'Polish / Język polski', - 'pt_BR': u'Portugese (BR) / português (BR)', - 'pt_PT': u'Portugese / português', - 'ro_RO': u'Romanian / limba română', + 'pt_BR': u'Portugese (BR) / Português (BR)', + 'pt_PT': u'Portugese / Português', + 'ro_RO': u'Romanian / română', 'ru_RU': u'Russian / русский язык', - 'sl_SL': u'Slovenian / slovenščina', - 'sq_AL': u'Albanian / Shqipëri', + 'si_LK': u'Sinhalese / සිංහල', + 'sl_SI': u'Slovenian / slovenščina', + 'sk_SK': u'Slovak / Slovenský jazyk', + 'sq_AL': u'Albanian / Shqip', + 'sr_RS': u'Serbian (Cyrillic) / српски', + 'sr@latin': u'Serbian (Latin) / srpski', 'sv_SE': u'Swedish / svenska', + 'te_IN': u'Telugu / తెలుగు', 'tr_TR': u'Turkish / Türkçe', - 'vi_VN': u'Vietnam / Cộng hòa xã hội chủ nghĩa Việt Nam', - 'uk_UA': u'Ukrainian / украї́нська мо́ва', + 'vi_VN': u'Vietnamese / Tiếng Việt', + 'uk_UA': u'Ukrainian / українська', + 'ur_PK': u'Urdu / اردو', 'zh_CN': u'Chinese (CN) / 简体中文', + 'zh_HK': u'Chinese (HK)', 'zh_TW': u'Chinese (TW) / 正體字', 'th_TH': u'Thai / ภาษาไทย', + 'tlh_TLH': u'Klingon', } return languages def scan_languages(): - import glob - file_list = [os.path.splitext(os.path.basename(f))[0] for f in glob.glob(os.path.join(config['root_path'],'addons', 'base', 'i18n', '*.po'))] + # Now it will take all languages from get languages function without filter it with base module languages lang_dict = get_languages() - ret = [(lang, lang_dict.get(lang, lang)) for lang in file_list] + ret = [(lang, lang_dict.get(lang, lang)) for lang in list(lang_dict)] ret.sort(key=lambda k:k[1]) return ret @@ -826,14 +1033,15 @@ def get_user_companies(cr, user): def _get_company_children(cr, ids): if not ids: return [] - cr.execute('SELECT id FROM res_company WHERE parent_id = any(array[%s])' %(','.join([str(x) for x in ids]),)) - res=[x[0] for x in cr.fetchall()] + cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),)) + res = [x[0] for x in cr.fetchall()] res.extend(_get_company_children(cr, res)) return res - cr.execute('SELECT comp.id FROM res_company AS comp, res_users AS u WHERE u.id = %s AND comp.id = u.company_id' % (user,)) - compids=[cr.fetchone()[0]] - compids.extend(_get_company_children(cr, compids)) - return compids + cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,)) + user_comp = cr.fetchone()[0] + if not user_comp: + return [] + return [user_comp] + _get_company_children(cr, [user_comp]) def mod10r(number): """ @@ -868,10 +1076,9 @@ def human_size(sz): def logged(f): from tools.func import wraps - + @wraps(f) def wrapper(*args, **kwargs): - import netsvc from pprint import pformat vector = ['Call -> function: %r' % f] @@ -882,7 +1089,7 @@ def logged(f): timeb4 = time.time() res = f(*args, **kwargs) - + vector.append(' result: %s' % pformat(res)) vector.append(' time delta: %s' % (time.time() - timeb4)) netsvc.Logger().notifyChannel('logged', netsvc.LOG_DEBUG, '\n'.join(vector)) @@ -925,7 +1132,7 @@ def debug(what): >>> func_foo(42) This will output on the logger: - + [Wed Dec 25 00:00:00 2008] DEBUG:func_foo:baz = 42 [Wed Dec 25 00:00:00 2008] DEBUG:func_foo:qnx = (42, 42) @@ -933,9 +1140,9 @@ def debug(what): --log-level=debug """ - import netsvc + warnings.warn("The tools.debug() method is deprecated, please use logging.", + DeprecationWarning, stacklevel=2) from inspect import stack - import re from pprint import pformat st = stack()[1] param = re.split("debug *\((.+)\)", st[4][0].strip())[1].strip() @@ -943,10 +1150,10 @@ def debug(what): what = pformat(what) if param != what: what = "%s = %s" % (param, what) - netsvc.Logger().notifyChannel(st[3], netsvc.LOG_DEBUG, what) + logging.getLogger(st[3]).debug(what) -icons = map(lambda x: (x,x), ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD', +__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD', 'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER', 'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE', 'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO', @@ -972,12 +1179,23 @@ icons = map(lambda x: (x,x), ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_ 'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase', 'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner', 'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph', -]) +'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test', +'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+', +'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver', +'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl', +'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus', +'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar', +'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow', +'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward', +'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific', +'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete' +] + +def icons(*a, **kw): + global __icons_list + return [(x, x) for x in __icons_list ] def extract_zip_file(zip_file, outdirectory): - import zipfile - import os - zf = zipfile.ZipFile(zip_file, 'r') out = outdirectory for path in zf.namelist(): @@ -992,14 +1210,296 @@ def extract_zip_file(zip_file, outdirectory): fp.close() zf.close() +def detect_ip_addr(): + """Try a very crude method to figure out a valid external + IP or hostname for the current machine. Don't rely on this + for binding to an interface, but it could be used as basis + for constructing a remote URL to the server. + """ + def _detect_ip_addr(): + from array import array + from struct import pack, unpack + try: + import fcntl + except ImportError: + fcntl = None + + ip_addr = None + + if not fcntl: # not UNIX: + host = socket.gethostname() + ip_addr = socket.gethostbyname(host) + else: # UNIX: + # get all interfaces: + nbytes = 128 * 32 + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + names = array('B', '\0' * nbytes) + #print 'names: ', names + outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0] + namestr = names.tostring() + + # try 64 bit kernel: + for i in range(0, outbytes, 40): + name = namestr[i:i+16].split('\0', 1)[0] + if name != 'lo': + ip_addr = socket.inet_ntoa(namestr[i+20:i+24]) + break + + # try 32 bit kernel: + if ip_addr is None: + ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)]) + + for ifname in [iface for iface in ifaces if iface != 'lo']: + ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24]) + break + + return ip_addr or 'localhost' + + try: + ip_addr = _detect_ip_addr() + except Exception: + ip_addr = 'localhost' + return ip_addr + +# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT: +# The server side never does any timestamp calculation, always +# sends them in a naive (timezone agnostic) format supposed to be +# expressed within the server timezone, and expects the clients to +# provide timestamps in the server timezone as well. +# It stores all timestamps in the database in naive format as well, +# which also expresses the time in the server timezone. +# For this reason the server makes its timezone name available via the +# common/timezone_get() rpc method, which clients need to read +# to know the appropriate time offset to use when reading/writing +# times. +def get_win32_timezone(): + """Attempt to return the "standard name" of the current timezone on a win32 system. + @return: the standard name of the current win32 timezone, or False if it cannot be found. + """ + res = False + if (sys.platform == "win32"): + try: + import _winreg + hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE) + current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS) + res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code + _winreg.CloseKey(current_tz_key) + _winreg.CloseKey(hklm) + except Exception: + pass + return res +def detect_server_timezone(): + """Attempt to detect the timezone to use on the server side. + Defaults to UTC if no working timezone can be found. + @return: the timezone identifier as expected by pytz.timezone. + """ + try: + import pytz + except Exception: + netsvc.Logger().notifyChannel("detect_server_timezone", netsvc.LOG_WARNING, + "Python pytz module is not available. Timezone will be set to UTC by default.") + return 'UTC' + + # Option 1: the configuration option (did not exist before, so no backwards compatibility issue) + # Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz + # Option 3: the environment variable TZ + sources = [ (config['timezone'], 'OpenERP configuration'), + (time.tzname[0], 'time.tzname'), + (os.environ.get('TZ',False),'TZ environment variable'), ] + # Option 4: OS-specific: /etc/timezone on Unix + if (os.path.exists("/etc/timezone")): + tz_value = False + try: + f = open("/etc/timezone") + tz_value = f.read(128).strip() + except Exception: + pass + finally: + f.close() + sources.append((tz_value,"/etc/timezone file")) + # Option 5: timezone info from registry on Win32 + if (sys.platform == "win32"): + # Timezone info is stored in windows registry. + # However this is not likely to work very well as the standard name + # of timezones in windows is rarely something that is known to pytz. + # But that's ok, it is always possible to use a config option to set + # it explicitly. + sources.append((get_win32_timezone(),"Windows Registry")) + + for (value,source) in sources: + if value: + try: + tz = pytz.timezone(value) + netsvc.Logger().notifyChannel("detect_server_timezone", netsvc.LOG_INFO, + "Using timezone %s obtained from %s." % (tz.zone,source)) + return value + except pytz.UnknownTimeZoneError: + netsvc.Logger().notifyChannel("detect_server_timezone", netsvc.LOG_WARNING, + "The timezone specified in %s (%s) is invalid, ignoring it." % (source,value)) + + netsvc.Logger().notifyChannel("detect_server_timezone", netsvc.LOG_WARNING, + "No valid timezone could be detected, using default UTC timezone. You can specify it explicitly with option 'timezone' in the server configuration.") + return 'UTC' + +def get_server_timezone(): + # timezone detection is safe in multithread, so lazy init is ok here + if (not config['timezone']): + config['timezone'] = detect_server_timezone() + return config['timezone'] + + +DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d" +DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S" +DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % ( + DEFAULT_SERVER_DATE_FORMAT, + DEFAULT_SERVER_TIME_FORMAT) + +# Python's strftime supports only the format directives +# that are available on the platform's libc, so in order to +# be cross-platform we map to the directives required by +# the C standard (1989 version), always available on platforms +# with a C standard implementation. +DATETIME_FORMATS_MAP = { + '%C': '', # century + '%D': '%m/%d/%Y', # modified %y->%Y + '%e': '%d', + '%E': '', # special modifier + '%F': '%Y-%m-%d', + '%g': '%Y', # modified %y->%Y + '%G': '%Y', + '%h': '%b', + '%k': '%H', + '%l': '%I', + '%n': '\n', + '%O': '', # special modifier + '%P': '%p', + '%R': '%H:%M', + '%r': '%I:%M:%S %p', + '%s': '', #num of seconds since epoch + '%T': '%H:%M:%S', + '%t': ' ', # tab + '%u': ' %w', + '%V': '%W', + '%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y + '%+': '%Y-%m-%d %H:%M:%S', + + # %Z is a special case that causes 2 problems at least: + # - the timezone names we use (in res_user.context_tz) come + # from pytz, but not all these names are recognized by + # strptime(), so we cannot convert in both directions + # when such a timezone is selected and %Z is in the format + # - %Z is replaced by an empty string in strftime() when + # there is not tzinfo in a datetime value (e.g when the user + # did not pick a context_tz). The resulting string does not + # parse back if the format requires %Z. + # As a consequence, we strip it completely from format strings. + # The user can always have a look at the context_tz in + # preferences to check the timezone. + '%z': '', + '%Z': '', +} + +def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name, + tz_offset=True, ignore_unparsable_time=True): + """ + Convert a source timestamp string into a destination timestamp string, attempting to apply the + correct offset if both the server and local timezone are recognized, or no + offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ). + + WARNING: This method is here to allow formatting dates correctly for inclusion in strings where + the client would not be able to format/offset it correctly. DO NOT use it for returning + date fields directly, these are supposed to be handled by the client!! + + @param src_tstamp_str: the str value containing the timestamp in the server timezone. + @param src_format: the format to use when parsing the server timestamp. + @param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone. + @param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context) + @param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed + using src_format or formatted using dst_format. + + @return: local/client formatted timestamp, expressed in the local/client timezone if possible + and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined. + """ + if not src_tstamp_str: + return False + res = src_tstamp_str + if src_format and dst_format: + # find out server timezone + server_tz = get_server_timezone() + try: + # dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!) + dt_value = datetime.strptime(src_tstamp_str, src_format) + if tz_offset and dst_tz_name: + try: + import pytz + src_tz = pytz.timezone(server_tz) + dst_tz = pytz.timezone(dst_tz_name) + src_dt = src_tz.localize(dt_value, is_dst=True) + dt_value = src_dt.astimezone(dst_tz) + except Exception: + pass + res = dt_value.strftime(dst_format) + except Exception: + # Normal ways to end up here are if strptime or strftime failed + if not ignore_unparsable_time: + return False + return res + + +def split_every(n, iterable, piece_maker=tuple): + """Splits an iterable into length-n pieces. The last piece will be shorter + if ``n`` does not evenly divide the iterable length. + @param ``piece_maker``: function to build the pieces + from the slices (tuple,list,...) + """ + iterator = iter(iterable) + piece = piece_maker(islice(iterator, n)) + while piece: + yield piece + piece = piece_maker(islice(iterator, n)) if __name__ == '__main__': import doctest doctest.testmod() +class upload_data_thread(threading.Thread): + def __init__(self, email, data, type): + self.args = [('email',email),('type',type),('data',data)] + super(upload_data_thread,self).__init__() + def run(self): + try: + import urllib + args = urllib.urlencode(self.args) + fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args) + fp.read() + fp.close() + except Exception: + pass + +def upload_data(email, data, type='SURVEY'): + a = upload_data_thread(email, data, type) + a.start() + return True + + +# port of python 2.6's attrgetter with support for dotted notation +def resolve_attr(obj, attr): + for name in attr.split("."): + obj = getattr(obj, name) + return obj + +def attrgetter(*items): + if len(items) == 1: + attr = items[0] + def g(obj): + return resolve_attr(obj, attr) + else: + def g(obj): + return tuple(resolve_attr(obj, attr) for attr in items) + return g # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: