##############################################################################
#
# OpenERP, Open Source Business Applications
-# Copyright (C) 2012 OpenERP S.A. (<http://openerp.com>).
+# Copyright (C) 2012-2013 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
import cgi
import logging
import lxml.html
+import lxml.html.clean as clean
import openerp.pooler as pooler
-import operator
import random
import re
import socket
import threading
import time
+from email.utils import getaddresses
from openerp.loglevels import ustr
# HTML Sanitizer
#----------------------------------------------------------
+tags_to_kill = ["script", "head", "meta", "title", "link", "style", "frame", "iframe", "base", "object", "embed"]
+tags_to_remove = ['html', 'body', 'font']
+
+
def html_sanitize(src):
if not src:
return src
src = ustr(src, errors='replace')
- root = lxml.html.fromstring(u"<div>%s</div>" % src)
- result = handle_element(root)
- res = []
- for element in children(result[0]):
- if isinstance(element, basestring):
- res.append(element)
- else:
- element.tail = ""
- res.append(lxml.html.tostring(element))
- return ''.join(res)
-
-# FIXME: shouldn't this be a whitelist rather than a blacklist?!
-to_remove = set(["script", "head", "meta", "title", "link", "img"])
-to_unwrap = set(["html", "body"])
-
-javascript_regex = re.compile(r"^\s*javascript\s*:.*$", re.IGNORECASE)
-
-def handle_a(el, new):
- href = el.get("href", "#")
- if javascript_regex.search(href):
- href = "#"
- new.set("href", href)
-
-special = {
- "a": handle_a,
-}
-
-def handle_element(element):
- if isinstance(element, basestring):
- return [element]
- if element.tag in to_remove:
- return []
- if element.tag in to_unwrap:
- return reduce(operator.add, [handle_element(x) for x in children(element)])
- result = lxml.html.fromstring("<%s />" % element.tag)
- for c in children(element):
- append_to(handle_element(c), result)
- if element.tag in special:
- special[element.tag](element, result)
- return [result]
-
-def children(node):
- res = []
- if node.text is not None:
- res.append(node.text)
- for child_node in node.getchildren():
- res.append(child_node)
- if child_node.tail is not None:
- res.append(child_node.tail)
- return res
-def append_to(elements, dest_node):
- for element in elements:
- if isinstance(element, basestring):
- children = dest_node.getchildren()
- if len(children) == 0:
- dest_node.text = element
- else:
- children[-1].tail = element
- else:
- dest_node.append(element)
+ # html encode email tags
+ part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL)
+ src = part.sub(lambda m: cgi.escape(m.group(1)), src)
+
+ # some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail)
+ try:
+ cleaner = clean.Cleaner(page_structure=True, style=False, safe_attrs_only=False, forms=False, kill_tags=tags_to_kill, remove_tags=tags_to_remove)
+ cleaned = cleaner.clean_html(src)
+ except TypeError:
+ # lxml.clean version < 2.3.1 does not have a kill_tags attribute
+ # to remove in 2014
+ cleaner = clean.Cleaner(page_structure=True, style=False, safe_attrs_only=False, forms=False, remove_tags=tags_to_kill + tags_to_remove)
+ cleaned = cleaner.clean_html(src)
+ except Exception, e:
+ if isinstance(e, etree.ParserError) and 'empty' in str(e):
+ return ""
+ _logger.warning('html_sanitize failed to parse %s' % (src))
+ cleaned = '<p>Impossible to parse</p>'
+
+ # MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution
+ cleaned = cleaned.replace('%24', '$')
+ cleaned = cleaned.replace('%7B', '{')
+ cleaned = cleaned.replace('%7D', '}')
+ cleaned = cleaned.replace('%20', ' ')
+ cleaned = cleaned.replace('%5B', '[')
+ cleaned = cleaned.replace('%5D', ']')
+
+ return cleaned
#----------------------------------------------------------
be present in the html string. This method therefore takes as input
html code coming from a sanitized source, like fields.html.
"""
- modified_html = ''
+ def _replace_matching_regex(regex, source, replace=''):
+ dest = ''
+ idx = 0
+ for item in re.finditer(regex, source):
+ dest += source[idx:item.start()] + replace
+ idx = item.end()
+ dest += source[idx:]
+ return dest
+
+ if not html or not isinstance(html, basestring):
+ return html
+
+ html = ustr(html)
+
+ # 0. remove encoding attribute inside tags
+ doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL)
+ html = doctype.sub(r"", html)
# 1. <br[ /]> -> \n, because otherwise the tree is obfuscated
- br_tags = re.compile(r'([<]\s*br\s*\/?[>])')
- idx = 0
- for item in re.finditer(br_tags, html):
- modified_html += html[idx:item.start()] + '__BR_TAG__'
- idx = item.end()
- modified_html += html[idx:]
- html = modified_html
+ br_tags = re.compile(r'([<]\s*[bB][rR]\s*\/?[>])')
+ html = _replace_matching_regex(br_tags, html, '__BR_TAG__')
# 2. form a tree, handle (currently ?) pure-text by enclosing them in a pre
root = lxml.html.fromstring(html)
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
+ # 2.5 remove quoted text in nodes
+ quote_tags = re.compile(r'(\n(>)+[^\n\r]*)')
+ for node in root.getiterator():
+ if not node.text:
+ continue
+ node.text = _replace_matching_regex(quote_tags, node.text)
+
# 3. remove blockquotes
quotes = [el for el in root.getiterator(tag='blockquote')]
for node in quotes:
+ # copy the node tail into parent text
+ if node.tail:
+ parent = node.getparent()
+ parent.text = parent.text or '' + node.tail
+ # remove the node
node.getparent().remove(node)
# 4. strip signatures
# 6. Misc cleaning :
# - ClEditor seems to love using <div><br /><div> -> replace with <br />
- modified_html = ''
br_div_tags = re.compile(r'(<div>\s*<br\s*\/>\s*<\/div>)')
- idx = 0
- for item in re.finditer(br_div_tags, html):
- modified_html += html[idx:item.start()] + '<br />'
- idx = item.end()
- modified_html += html[idx:]
- html = modified_html
+ html = _replace_matching_regex(br_div_tags, html, '<br />')
return html
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
-
- from lxml.etree import tostring, fromstring, HTMLParser
- tree = fromstring(html, parser=HTMLParser())
+ tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
source = tree.xpath('//*[@id=%s]' % (body_id,))
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
- html = ustr(tostring(tree, encoding=encoding))
+ html = ustr(etree.tostring(tree, encoding=encoding))
+ # \r char is converted into , must remove it
+ html = html.replace(' ', '')
html = html.replace('<strong>', '*').replace('</strong>', '*')
html = html.replace('<b>', '*').replace('</b>', '*')
return html
-def text2html(text, container_tag='div'):
+def plaintext2html(text, container_tag=False):
""" Convert plaintext into html. Content of the text is escaped to manage
html entities, using cgi.escape().
- all \n,\r are replaced by <br />
:param string container_tag: container of the html; by default the
content is embedded into a <div>
"""
- text = cgi.escape(text)
+ text = cgi.escape(ustr(text))
# 1. replace \n and \r
text = text.replace('\n', '<br/>')
# 4. container
if container_tag:
final = '<%s>%s</%s>' % (container_tag, final, container_tag)
- return final
+ return ustr(final)
+
+def append_content_to_html(html, content, plaintext=True, preserve=False, container_tag=False):
+ """ Append extra content at the end of an HTML snippet, trying
+ to locate the end of the HTML document (</body>, </html>, or
+ EOF), and converting the provided content in html unless ``plaintext``
+ is False.
+ Content conversion can be done in two ways:
+ - wrapping it into a pre (preserve=True)
+ - use plaintext2html (preserve=False, using container_tag to wrap the
+ whole content)
+ A side-effect of this method is to coerce all HTML tags to
+ lowercase in ``html``, and strip enclosing <html> or <body> tags in
+ content if ``plaintext`` is False.
+
+ :param str html: html tagsoup (doesn't have to be XHTML)
+ :param str content: extra content to append
+ :param bool plaintext: whether content is plaintext and should
+ be wrapped in a <pre/> tag.
+ :param bool preserve: if content is plaintext, wrap it into a <pre>
+ instead of converting it into html
+ """
+ html = ustr(html)
+ if plaintext and preserve:
+ content = u'\n<pre>%s</pre>\n' % ustr(content)
+ elif plaintext:
+ content = '\n%s\n' % plaintext2html(content, container_tag)
+ else:
+ content = re.sub(r'(?i)(</?html.*>|</?body.*>|<!\W*DOCTYPE.*>)', '', content)
+ content = u'\n%s\n' % ustr(content)
+ # Force all tags to lowercase
+ html = re.sub(r'(</?)\W*(\w+)([ >])',
+ lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
+ insert_location = html.find('</body>')
+ if insert_location == -1:
+ insert_location = html.find('</html>')
+ if insert_location == -1:
+ return '%s%s' % (html, content)
+ return '%s%s%s' % (html[:insert_location], content, html[insert_location:])
#----------------------------------------------------------
# Emails
"""
# If not cr, get cr from current thread database
+ local_cr = None
if not cr:
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
- cr = pooler.get_db_only(db_name).cursor()
+ local_cr = cr = pooler.get_db(db_name).cursor()
else:
raise Exception("No database cursor found, please pass one explicitly")
_logger.exception("tools.email_send failed to deliver email")
return False
finally:
- cr.close()
+ if local_cr:
+ cr.close()
return res
def email_split(text):
""" Return a list of the email addresses found in ``text`` """
if not text:
return []
- return re.findall(r'([^ ,<@]+@[^> ,]+)', text)
-
-def append_content_to_html(html, content, plaintext=True):
- """Append extra content at the end of an HTML snippet, trying
- to locate the end of the HTML document (</body>, </html>, or
- EOF), and wrapping the provided content in a <pre/> block
- unless ``plaintext`` is False. A side-effect of this
- method is to coerce all HTML tags to lowercase in ``html``,
- and strip enclosing <html> or <body> tags in content if
- ``plaintext`` is False.
-
- :param str html: html tagsoup (doesn't have to be XHTML)
- :param str content: extra content to append
- :param bool plaintext: whether content is plaintext and should
- be wrapped in a <pre/> tag.
- """
- html = ustr(html)
- if plaintext:
- content = u'\n<pre>%s</pre>\n' % ustr(content)
- else:
- content = re.sub(r'(?i)(</?html.*>|</?body.*>|<!\W*DOCTYPE.*>)', '', content)
- content = u'\n%s\n' % ustr(content)
- # Force all tags to lowercase
- html = re.sub(r'(</?)\W*(\w+)([ >])',
- lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
- insert_location = html.find('</body>')
- if insert_location == -1:
- insert_location = html.find('</html>')
- if insert_location == -1:
- return '%s%s' % (html, content)
- return '%s%s%s' % (html[:insert_location], content, html[insert_location:])
+ return [addr[1] for addr in getaddresses([text])
+ # getaddresses() returns '' when email parsing fails, and
+ # sometimes returns emails without at least '@'. The '@'
+ # is strictly required in RFC2822's `addr-spec`.
+ if addr[1]
+ if '@' in addr[1]]
\ No newline at end of file