##############################################################################
#
# OpenERP, Open Source Business Applications
-# Copyright (C) 2012-2013 OpenERP S.A. (<http://openerp.com>).
+# Copyright (C) 2012-TODAY OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
import socket
import threading
import time
+from email.utils import getaddresses
import openerp
from openerp.loglevels import ustr
tags_to_remove = ['html', 'body', 'font']
# allow new semantic HTML5 tags
-allowed_tags = clean.defs.tags | frozenset('article section header footer hgroup nav aside figure'.split())
-safe_attrs = clean.defs.safe_attrs | frozenset(['style'])
+allowed_tags = clean.defs.tags | frozenset('article section header footer hgroup nav aside figure main'.split() + [etree.Comment])
+safe_attrs = clean.defs.safe_attrs | frozenset(
+ ['style',
+ 'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translate', 'data-oe-nodeid',
+ 'data-publish', 'data-id', 'data-res_id', 'data-member_id', 'data-view-id'
+ ])
-def html_sanitize(src, silent=True):
+
+def html_sanitize(src, silent=True, strict=False):
if not src:
return src
src = ustr(src, errors='replace')
# html encode email tags
part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL)
src = part.sub(lambda m: cgi.escape(m.group(1)), src)
+ # html encode mako tags <% ... %> to decode them later and keep them alive, otherwise they are stripped by the cleaner
+ src = src.replace('<%', cgi.escape('<%'))
+ src = src.replace('%>', cgi.escape('%>'))
kwargs = {
'page_structure': True,
'forms': True, # remove form tags
'remove_unknown_tags': False,
'allow_tags': allowed_tags,
+ 'comments': False,
+ 'processing_instructions': False
}
if etree.LXML_VERSION >= (2, 3, 1):
# kill_tags attribute has been added in version 2.3.1
else:
kwargs['remove_tags'] = tags_to_kill + tags_to_remove
- if etree.LXML_VERSION >= (3, 1, 0):
- kwargs.update({
- 'safe_attrs_only': True,
- 'safe_attrs': safe_attrs,
- })
+ if strict:
+ if etree.LXML_VERSION >= (3, 1, 0):
+ # lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style"
+ kwargs.update({
+ 'safe_attrs_only': True,
+ 'safe_attrs': safe_attrs,
+ })
else:
- # lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style"
- kwargs['safe_attrs_only'] = False
+ kwargs['safe_attrs_only'] = False # keep oe-data attributes + style
+ kwargs['frames'] = False, # do not remove frames (embbed video in CMS blogs)
try:
# some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail)
cleaner = clean.Cleaner(**kwargs)
cleaned = cleaner.clean_html(src)
- except etree.ParserError:
+ # MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution
+ cleaned = cleaned.replace('%24', '$')
+ cleaned = cleaned.replace('%7B', '{')
+ cleaned = cleaned.replace('%7D', '}')
+ cleaned = cleaned.replace('%20', ' ')
+ cleaned = cleaned.replace('%5B', '[')
+ cleaned = cleaned.replace('%5D', ']')
+ cleaned = cleaned.replace('<%', '<%')
+ cleaned = cleaned.replace('%>', '%>')
+ except etree.ParserError, e:
+ if 'empty' in str(e):
+ return ""
if not silent:
raise
logger.warning('ParserError obtained when sanitizing %r', src, exc_info=True)
raise
logger.warning('unknown error obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>Unknown error when sanitizing</p>'
+
+ # this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
+ if cleaned.startswith('<div>') and cleaned.endswith('</div>'):
+ cleaned = cleaned[5:-6]
+
return cleaned
# HTML Cleaner
#----------------------------------------------------------
-def html_email_clean(html, remove=False, shorten=False, max_length=300):
+def html_email_clean(html, remove=False, shorten=False, max_length=300, expand_options=None,
+ protect_sections=False):
""" html_email_clean: clean the html by doing the following steps:
- try to strip email quotes, by removing blockquotes or having some client-
be flagged as to remove
:param int max_length: if shortening, maximum number of characters before
shortening
+ :param dict expand_options: options for the read more link when shortening
+ the content.The used keys are the following:
+
+ - oe_expand_container_tag: class applied to the
+ container of the whole read more link
+ - oe_expand_container_class: class applied to the
+ link container (default: oe_mail_expand)
+ - oe_expand_container_content: content of the
+ container (default: ...)
+ - oe_expand_separator_node: optional separator, like
+ adding ... <br /><br /> <a ...>read more</a> (default: void)
+ - oe_expand_a_href: href of the read more link itself
+ (default: #)
+ - oe_expand_a_class: class applied to the <a> containing
+ the link itself (default: oe_mail_expand)
+ - oe_expand_a_content: content of the <a> (default: read more)
+
+ The formatted read more link is the following:
+ <cont_tag class="oe_expand_container_class">
+ oe_expand_container_content
+ if expand_options.get('oe_expand_separator_node'):
+ <oe_expand_separator_node/>
+ <a href="oe_expand_a_href" class="oe_expand_a_class">
+ oe_expand_a_content
+ </a>
+ </span>
"""
def _replace_matching_regex(regex, source, replace=''):
""" Replace all matching expressions in source by replace """
iteration += 1
new_node = _insert_new_node(node, -1, new_node_tag, text[idx:] + (cur_node.tail or ''), None, {})
- def _truncate_node(node, position, find_first_blank=True):
+ def _truncate_node(node, position, simplify_whitespaces=True):
+ """ Truncate a node text at a given position. This algorithm will shorten
+ at the end of the word whose ending character exceeds position.
+
+ :param bool simplify_whitespaces: whether to try to count all successive
+ whitespaces as one character. This
+ option should not be True when trying
+ to keep 'pre' consistency.
+ """
if node.text is None:
node.text = ''
- # truncate text
- end_position = position if len(node.text) >= position else len(node.text)
- innertext = node.text[0:end_position]
- outertext = node.text[end_position:]
- if find_first_blank:
- stop_idx = outertext.find(' ')
- if stop_idx == -1:
- stop_idx = len(outertext)
+
+ truncate_idx = -1
+ if simplify_whitespaces:
+ cur_char_nbr = 0
+ word = None
+ node_words = node.text.strip(' \t\r\n').split()
+ for word in node_words:
+ cur_char_nbr += len(word)
+ if cur_char_nbr >= position:
+ break
+ if word:
+ truncate_idx = node.text.find(word) + len(word)
else:
- stop_idx = 0
- node.text = innertext + outertext[0:stop_idx]
+ truncate_idx = position
+ if truncate_idx == -1 or truncate_idx > len(node.text):
+ truncate_idx = len(node.text)
+
+ # compose new text bits
+ innertext = node.text[0:truncate_idx]
+ outertext = node.text[truncate_idx:]
+ node.text = innertext
+
# create <span> ... <a href="#">read more</a></span> node
- read_more_node = _create_node('span', ' ... ', None, {'class': 'oe_mail_expand'})
- read_more_link_node = _create_node('a', 'read more', None, {'href': '#', 'class': 'oe_mail_expand'})
+ read_more_node = _create_node(
+ expand_options.get('oe_expand_container_tag', 'span'),
+ expand_options.get('oe_expand_container_content', ' ... '),
+ None,
+ {'class': expand_options.get('oe_expand_container_class', 'oe_mail_expand')}
+ )
+ if expand_options.get('oe_expand_separator_node'):
+ read_more_separator_node = _create_node(
+ expand_options.get('oe_expand_separator_node'),
+ '',
+ None,
+ {}
+ )
+ read_more_node.append(read_more_separator_node)
+ read_more_link_node = _create_node(
+ 'a',
+ expand_options.get('oe_expand_a_content', 'read more'),
+ None,
+ {
+ 'href': expand_options.get('oe_expand_a_href', '#'),
+ 'class': expand_options.get('oe_expand_a_class', 'oe_mail_expand'),
+ }
+ )
read_more_node.append(read_more_link_node)
# create outertext node
- overtext_node = _create_node('span', outertext[stop_idx:])
+ overtext_node = _create_node('span', outertext)
# tag node
overtext_node.set('in_overlength', '1')
# add newly created nodes in dom
node.append(read_more_node)
node.append(overtext_node)
+ if expand_options is None:
+ expand_options = {}
+
if not html or not isinstance(html, basestring):
return html
html = ustr(html)
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
- # remove all tails and replace them by a span element, because managing text and tails can be a pain in the ass
- for node in root.getiterator():
+ quote_tags = re.compile(r'(\n(>)+[^\n\r]*)')
+ signature = re.compile(r'([-]{2,}[\s]?[\r\n]{1,2}[\s\S]+)')
+ for node in root.iter():
+ # remove all tails and replace them by a span element, because managing text and tails can be a pain in the ass
if node.tail:
tail_node = _create_node('span', node.tail)
node.tail = None
node.addnext(tail_node)
- # form node and tag text-based quotes and signature
- quote_tags = re.compile(r'(\n(>)+[^\n\r]*)')
- signature = re.compile(r'([-]{2,}[\s]?[\r\n]{1,2}[\s\S]+)')
- for node in root.getiterator():
+ # form node and tag text-based quotes and signature
_tag_matching_regex_in_text(quote_tags, node, 'span', {'text_quote': '1'})
_tag_matching_regex_in_text(signature, node, 'span', {'text_signature': '1'})
# signature_begin = False # try dynamic signature recognition
quote_begin = False
overlength = False
+ overlength_section_id = None
+ overlength_section_count = 0
cur_char_nbr = 0
- for node in root.getiterator():
+ for node in root.iter():
+ # comments do not need processing
+ # note: bug in node.get(value, default) for HtmlComments, default never returned
+ if node.tag == etree.Comment:
+ continue
+ # do not take into account multiple spaces that are displayed as max 1 space in html
+ node_text = ' '.join((node.text and node.text.strip(' \t\r\n') or '').split())
+
# root: try to tag the client used to write the html
if 'WordSection1' in node.get('class', '') or 'MsoNormal' in node.get('class', ''):
root.set('msoffice', '1')
if 'SkyDrivePlaceholder' in node.get('class', '') or 'SkyDrivePlaceholder' in node.get('id', ''):
root.set('hotmail', '1')
+ # protect sections by tagging section limits and blocks contained inside sections, using an increasing id to re-find them later
+ if node.tag == 'section':
+ overlength_section_count += 1
+ node.set('section_closure', str(overlength_section_count))
+ if node.getparent() is not None and (node.getparent().get('section_closure') or node.getparent().get('section_inner')):
+ node.set('section_inner', str(overlength_section_count))
+
# state of the parsing: flag quotes and tails to remove
if quote_begin:
node.set('in_quote', '1')
node.set('tail_remove', '1')
- # state of the parsing: flag when being in over-length content
+ # state of the parsing: flag when being in over-length content, depending on section content if defined (only when having protect_sections)
if overlength:
- node.set('in_overlength', '1')
- node.set('tail_remove', '1')
+ if not overlength_section_id or int(node.get('section_inner', overlength_section_count + 1)) > overlength_section_count:
+ node.set('in_overlength', '1')
+ node.set('tail_remove', '1')
# find quote in msoffice / hotmail / blockquote / text quote and signatures
if root.get('msoffice') and node.tag == 'div' and 'border-top:solid' in node.get('style', ''):
node.set('in_quote', '1')
node.set('tail_remove', '1')
if node.tag == 'blockquote' or node.get('text_quote') or node.get('text_signature'):
+ # here no quote_begin because we want to be able to remove some quoted
+ # text without removing all the remaining context
+ node.set('in_quote', '1')
+ if node.getparent() is not None and node.getparent().get('in_quote'):
+ # inside a block of removed text but not in quote_begin (see above)
node.set('in_quote', '1')
# shorten:
- # 1/ truncate the text at the next available space
- # 2/ create a 'read more' node, next to current node
- # 3/ add the truncated text in a new node, next to 'read more' node
- if shorten and not overlength and cur_char_nbr + len(node.text or '') > max_length:
+ # if protect section:
+ # 1/ find the first parent not being inside a section
+ # 2/ add the read more link
+ # else:
+ # 1/ truncate the text at the next available space
+ # 2/ create a 'read more' node, next to current node
+ # 3/ add the truncated text in a new node, next to 'read more' node
+ node_text = (node.text or '').strip().strip('\n').strip()
+ if shorten and not overlength and cur_char_nbr + len(node_text) > max_length:
node_to_truncate = node
- while node_to_truncate.get('in_quote') and node_to_truncate.getparent() is not None:
- node_to_truncate = node_to_truncate.getparent()
+ while node_to_truncate.getparent() is not None:
+ if node_to_truncate.get('in_quote'):
+ node_to_truncate = node_to_truncate.getparent()
+ elif protect_sections and (node_to_truncate.getparent().get('section_inner') or node_to_truncate.getparent().get('section_closure')):
+ node_to_truncate = node_to_truncate.getparent()
+ overlength_section_id = node_to_truncate.get('section_closure')
+ else:
+ break
+
overlength = True
node_to_truncate.set('truncate', '1')
- node_to_truncate.set('truncate_position', str(max_length - cur_char_nbr))
- cur_char_nbr += len(node.text or '')
+ if node_to_truncate == node:
+ node_to_truncate.set('truncate_position', str(max_length - cur_char_nbr))
+ else:
+ node_to_truncate.set('truncate_position', str(len(node.text or '')))
+ cur_char_nbr += len(node_text)
# Tree modification
# ------------------------------------------------------------
for node in root.iter():
if node.get('truncate'):
- _truncate_node(node, int(node.get('truncate_position', '0')))
+ _truncate_node(node, int(node.get('truncate_position', '0')), node.tag != 'pre')
# Post processing
# ------------------------------------------------------------
to_remove = []
- for node in root.getiterator():
+ for node in root.iter():
if node.get('in_quote') or node.get('in_overlength'):
# copy the node tail into parent text
if node.tail and not node.get('tail_remove'):
to_remove.append(node)
if node.get('tail_remove'):
node.tail = ''
+ # clean node
+ for attribute_name in ['in_quote', 'tail_remove', 'in_overlength', 'msoffice', 'hotmail', 'truncate', 'truncate_position']:
+ node.attrib.pop(attribute_name, None)
for node in to_remove:
if remove:
node.getparent().remove(node)
else:
- if not 'oe_mail_expand' in node.get('class', ''): # trick: read more link should be displayed even if it's in overlength
- node_class = node.get('class', '') + ' ' + 'oe_mail_cleaned'
+ if not expand_options.get('oe_expand_a_class', 'oe_mail_expand') in node.get('class', ''): # trick: read more link should be displayed even if it's in overlength
+ node_class = node.get('class', '') + ' oe_mail_cleaned'
node.set('class', node_class)
# html: \n that were tail of elements have been encapsulated into <span> -> back to \n
html = etree.tostring(root, pretty_print=False)
- linebreaks = re.compile(r'<span>([\s]*[\r\n]+[\s]*)<\/span>', re.IGNORECASE | re.DOTALL)
+ linebreaks = re.compile(r'<span[^>]*>([\s]*[\r\n]+[\s]*)<\/span>', re.IGNORECASE | re.DOTALL)
html = _replace_matching_regex(linebreaks, html, '\n')
return html
html = re.sub('<br\s*/?>', '\n', html)
html = re.sub('<.*?>', ' ', html)
html = html.replace(' ' * 2, ' ')
+ html = html.replace('>', '>')
+ html = html.replace('<', '<')
+ html = html.replace('&', '&')
# strip all lines
html = '\n'.join([x.strip() for x in html.splitlines()])
elif plaintext:
content = '\n%s\n' % plaintext2html(content, container_tag)
else:
- content = re.sub(r'(?i)(</?html.*>|</?body.*>|<!\W*DOCTYPE.*>)', '', content)
+ content = re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', content)
content = u'\n%s\n' % ustr(content)
# Force all tags to lowercase
html = re.sub(r'(</?)\W*(\w+)([ >])',
# Updated in 7.0 to match the model name as well
# Typical form of references is <timestamp-openerp-record_id-model_name@domain>
# group(1) = the record ID ; group(2) = the model (if any) ; group(3) = the domain
-reference_re = re.compile("<.*-open(?:object|erp)-(\\d+)(?:-([\w.]+))?.*@(.*)>", re.UNICODE)
+reference_re = re.compile("<.*-open(?:object|erp)-(\\d+)(?:-([\w.]+))?[^>]*@([^>]*)>", re.UNICODE)
-# Bounce regex
-# Typical form of bounce is bounce-128-crm.lead-34@domain
-# group(1) = the mail ID; group(2) = the model (if any); group(3) = the record ID
-bounce_re = re.compile("[\w]+-(\d+)-?([\w.]+)?-?(\d+)?", re.UNICODE)
def generate_tracking_message_id(res_id):
"""Returns a string that can be used in the Message-ID RFC822 header field
if not cr:
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
- local_cr = cr = openerp.registry(db_name).db.cursor()
+ local_cr = cr = openerp.registry(db_name).cursor()
else:
raise Exception("No database cursor found, please pass one explicitly")
""" Return a list of the email addresses found in ``text`` """
if not text:
return []
- return re.findall(r'([^ ,<@]+@[^> ,]+)', text)
+ return [addr[1] for addr in getaddresses([text])
+ # getaddresses() returns '' when email parsing fails, and
+ # sometimes returns emails without at least '@'. The '@'
+ # is strictly required in RFC2822's `addr-spec`.
+ if addr[1]
+ if '@' in addr[1]]