+email_re = re.compile(r"""
+ ([a-zA-Z][\w\.-]*[a-zA-Z0-9] # username part
+ @ # mandatory @ sign
+ [a-zA-Z0-9][\w\.-]* # domain must start with a letter ... Ged> why do we include a 0-9 then?
+ \.
+ [a-z]{2,3} # TLD
+ )
+ """, re.VERBOSE)
+res_re = re.compile(r"\[([0-9]+)\]", re.UNICODE)
+command_re = re.compile("^Set-([a-z]+) *: *(.+)$", re.I + re.UNICODE)
+reference_re = re.compile("<.*-openobject-(\\d+)@(.*)>", re.UNICODE)
+
+priorities = {
+ '1': '1 (Highest)',
+ '2': '2 (High)',
+ '3': '3 (Normal)',
+ '4': '4 (Low)',
+ '5': '5 (Lowest)',
+ }
+
+def html2plaintext(html, body_id=None, encoding='utf-8'):
+ ## (c) Fry-IT, www.fry-it.com, 2007
+ ## <peter@fry-it.com>
+ ## download here: http://www.peterbe.com/plog/html2plaintext
+
+
+ """ from an HTML text, convert the HTML to plain text.
+ If @body_id is provided then this is the tag where the
+ body (not necessarily <body>) starts.
+ """
+ try:
+ from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment
+ except:
+ return html
+
+ urls = []
+ if body_id is not None:
+ strainer = SoupStrainer(id=body_id)
+ else:
+ strainer = SoupStrainer('body')
+
+ soup = BeautifulSoup(html, parseOnlyThese=strainer, fromEncoding=encoding)
+ for link in soup.findAll('a'):
+ title = link.renderContents()
+ for url in [x[1] for x in link.attrs if x[0]=='href']:
+ urls.append(dict(url=url, tag=str(link), title=title))
+
+ html = soup.__str__()
+
+ url_index = []
+ i = 0
+ for d in urls:
+ if d['title'] == d['url'] or 'http://'+d['title'] == d['url']:
+ html = html.replace(d['tag'], d['url'])
+ else:
+ i += 1
+ html = html.replace(d['tag'], '%s [%s]' % (d['title'], i))
+ url_index.append(d['url'])
+
+ html = html.replace('<strong>','*').replace('</strong>','*')
+ html = html.replace('<b>','*').replace('</b>','*')
+ html = html.replace('<h3>','*').replace('</h3>','*')
+ html = html.replace('<h2>','**').replace('</h2>','**')
+ html = html.replace('<h1>','**').replace('</h1>','**')
+ html = html.replace('<em>','/').replace('</em>','/')
+
+
+ # the only line breaks we respect is those of ending tags and
+ # breaks
+
+ html = html.replace('\n',' ')
+ html = html.replace('<br>', '\n')
+ html = html.replace('<tr>', '\n')
+ html = html.replace('</p>', '\n\n')
+ html = re.sub('<br\s*/>', '\n', html)
+ html = html.replace(' ' * 2, ' ')
+
+
+ # for all other tags we failed to clean up, just remove then and
+ # complain about them on the stderr
+ def desperate_fixer(g):
+ #print >>sys.stderr, "failed to clean up %s" % str(g.group())
+ return ' '
+
+ html = re.sub('<.*?>', desperate_fixer, html)
+
+ # lstrip all lines
+ html = '\n'.join([x.lstrip() for x in html.splitlines()])
+
+ for i, url in enumerate(url_index):
+ if i == 0:
+ html += '\n\n'
+ html += '[%s] %s\n' % (i+1, url)
+ return html
+
+def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
+ attach=None, openobject_id=False, ssl=False, debug=False, subtype='plain', x_headers=None, priority='3'):
+
+ """Send an email.
+
+ Arguments:
+
+ `email_from`: A string used to fill the `From` header, if falsy,
+ config['email_from'] is used instead. Also used for
+ the `Reply-To` header if `reply_to` is not provided
+
+ `email_to`: a sequence of addresses to send the mail to.
+ """