# -*- coding: utf-8 -*-
-import fnmatch
+import hashlib
import inspect
import itertools
import logging
import math
+import mimetypes
import re
-import urllib
import urlparse
-import simplejson
import werkzeug
import werkzeug.exceptions
+import werkzeug.utils
import werkzeug.wrappers
# optional python-slugify import (https://github.com/un33k/python-slugify)
try:
import openerp
from openerp.osv import orm, osv, fields
from openerp.tools.safe_eval import safe_eval
-from openerp.addons.web.http import request, LazyResponse
+from openerp.addons.web.http import request
logger = logging.getLogger(__name__)
-def keep_query(*args, **kw):
- if not args and not kw:
- args = ('*',)
- params = kw.copy()
- query_params = frozenset(werkzeug.url_decode(request.httprequest.query_string).keys())
- for keep_param in args:
- for param in fnmatch.filter(query_params, keep_param):
- if param not in params and param in request.params:
- params[param] = request.params[param]
- return werkzeug.urls.url_encode(params)
-
def url_for(path_or_uri, lang=None):
+ if isinstance(path_or_uri, unicode):
+ path_or_uri = path_or_uri.encode('utf-8')
+ current_path = request.httprequest.path
+ if isinstance(current_path, unicode):
+ current_path = current_path.encode('utf-8')
location = path_or_uri.strip()
+ force_lang = lang is not None
url = urlparse.urlparse(location)
- if request and not url.netloc and not url.scheme:
- location = urlparse.urljoin(request.httprequest.path, location)
- force_lang = lang is not None
+
+ if request and not url.netloc and not url.scheme and (url.path or force_lang):
+ location = urlparse.urljoin(current_path, location)
+
lang = lang or request.context.get('lang')
langs = [lg[0] for lg in request.website.get_languages()]
- if lang != request.website.default_lang_code and (force_lang or (location[0] == '/' and len(langs) > 1)):
- if is_multilang_url(location):
- ps = location.split('/')
- if ps[1] in langs:
- ps[1] = lang
- else:
- ps.insert(1, lang)
- location = '/'.join(ps)
-
- return location
-def is_multilang_url(path):
+ if (len(langs) > 1 or force_lang) and is_multilang_url(location, langs):
+ ps = location.split('/')
+ if ps[1] in langs:
+ # Replace the language only if we explicitly provide a language to url_for
+ if force_lang:
+ ps[1] = lang
+ # Remove the default language unless it's explicitly provided
+ elif ps[1] == request.website.default_lang_code:
+ ps.pop(1)
+ # Insert the context language or the provided language
+ elif lang != request.website.default_lang_code or force_lang:
+ ps.insert(1, lang)
+ location = '/'.join(ps)
+
+ return location.decode('utf-8')
+
+def is_multilang_url(local_url, langs=None):
+ if not langs:
+ langs = [lg[0] for lg in request.website.get_languages()]
+ spath = local_url.split('/')
+ # if a language is already in the path, remove it
+ if spath[1] in langs:
+ spath.pop(1)
+ local_url = '/'.join(spath)
try:
+ # Try to match an endpoint in werkzeug's routing table
+ url = local_url.split('?')
+ path = url[0]
+ query_string = url[1] if len(url) > 1 else None
router = request.httprequest.app.get_db_router(request.db).bind('')
- func = router.match(path)[0]
+ func = router.match(path, query_args=query_string)[0]
return func.routing.get('multilang', False)
except Exception:
return False
def slugify(s, max_length=None):
if slugify_lib:
- return slugify_lib.slugify(s, max_length)
+ # There are 2 different libraries only python-slugify is supported
+ try:
+ return slugify_lib.slugify(s, max_length=max_length)
+ except TypeError:
+ pass
spaceless = re.sub(r'\s+', '-', s)
specialless = re.sub(r'[^-_A-Za-z0-9]', '', spaceless)
return specialless[:max_length]
else:
# assume name_search result tuple
id, name = value
- return "%s-%d" % (slugify(name), id)
+ slugname = slugify(name or '')
+ if not slugname:
+ return str(id)
+ return "%s-%d" % (slugname, id)
def urlplus(url, params):
- if not params:
- return url
-
- # can't use urlencode because it encodes to (ascii, replace) in p2
- return "%s?%s" % (url, '&'.join(
- k + '=' + urllib.quote_plus(v.encode('utf-8') if isinstance(v, unicode) else str(v))
- for k, v in params.iteritems()
- ))
-
-def quote_plus(value):
- return urllib.quote_plus(value.encode('utf-8') if isinstance(value, unicode) else str(value))
+ return werkzeug.Href(url)(params or None)
class website(osv.osv):
def _get_menu_website(self, cr, uid, ids, context=None):
menu = menus and menus[0] or False
return dict( map(lambda x: (x, menu), ids) )
- def _get_public_user(self, cr, uid, ids, name='public_user', arg=(), context=None):
- ref = self.get_public_user(cr, uid, context=context)
- return dict( map(lambda x: (x, ref), ids) )
-
_name = "website" # Avoid website.website convention for conciseness (for new api). Got a special authorization from xmo and rco
_description = "Website"
_columns = {
'social_youtube': fields.char('Youtube Account'),
'social_googleplus': fields.char('Google+ Account'),
'google_analytics_key': fields.char('Google Analytics Key'),
- 'public_user': fields.function(_get_public_user, relation='res.users', type='many2one', string='Public User'),
+ 'user_id': fields.many2one('res.users', string='Public User'),
+ 'partner_id': fields.related('user_id','partner_id', type='many2one', relation='res.partner', string='Public Partner'),
'menu_id': fields.function(_get_menu, relation='website.menu', type='many2one', string='Main Menu',
store= {
'website.menu': (_get_menu_website, ['sequence','parent_id','website_id'], 10)
})
}
+ _defaults = {
+ 'company_id': lambda self,cr,uid,c: self.pool['ir.model.data'].xmlid_to_res_id(cr, openerp.SUPERUSER_ID, 'base.public_user'),
+ }
+
# cf. Wizard hack in website_views.xml
def noop(self, *args, **kwargs):
pass
return '%s.%s' % (module, slugify(name, max_length=50))
def page_exists(self, cr, uid, ids, name, module='website', context=None):
- page = self.page_for_name(cr, uid, ids, name, module=module, context=context)
-
try:
- self.pool["ir.model.data"].get_object_reference(cr, uid, module, name)
+ return self.pool["ir.model.data"].get_object_reference(cr, uid, module, name)
except:
return False
- def get_public_user(self, cr, uid, context=None):
- uid = openerp.SUPERUSER_ID
- res = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'public_user')
- return res and res[1] or False
-
@openerp.tools.ormcache(skiparg=3)
def _get_languages(self, cr, uid, id, context=None):
website = self.browse(cr, uid, id)
# TODO: Select website, currently hard coded
return self.pool['website'].browse(cr, uid, 1, context=context)
- def preprocess_request(self, cr, uid, ids, request, context=None):
- # TODO FP: is_website_publisher and editable in context should be removed
- # for performance reasons (1 query per image to load) but also to be cleaner
- # I propose to replace this by a group 'base.group_website_publisher' on the
- # view that requires it.
- Access = request.registry['ir.model.access']
+ def is_publisher(self, cr, uid, ids, context=None):
+ Access = self.pool['ir.model.access']
is_website_publisher = Access.check(cr, uid, 'ir.ui.view', 'write', False, context)
-
- lang = request.context['lang']
- is_master_lang = lang == request.website.default_lang_code
-
- request.redirect = lambda url: werkzeug.utils.redirect(url_for(url))
- request.context.update(
- is_master_lang=is_master_lang,
- editable=is_website_publisher,
- translatable=not is_master_lang,
- )
+ return is_website_publisher
def get_template(self, cr, uid, ids, template, context=None):
- if '.' not in template:
- template = 'website.%s' % template
- module, xmlid = template.split('.', 1)
- model, view_id = request.registry["ir.model.data"].get_object_reference(cr, uid, module, xmlid)
+ if isinstance(template, (int, long)):
+ view_id = template
+ else:
+ if '.' not in template:
+ template = 'website.%s' % template
+ module, xmlid = template.split('.', 1)
+ model, view_id = request.registry["ir.model.data"].get_object_reference(cr, uid, module, xmlid)
return self.pool["ir.ui.view"].browse(cr, uid, view_id, context=context)
def _render(self, cr, uid, ids, template, values=None, context=None):
- user = self.pool.get("res.users")
- if not context:
- context = {}
-
- # Take a context
- qweb_values = context.copy()
- # add some values
- if values:
- qweb_values.update(values)
- # fill some defaults
- qweb_values.update(
- request=request,
- json=simplejson,
- website=request.website,
- url_for=url_for,
- keep_query=keep_query,
- slug=slug,
- res_company=request.website.company_id,
- user_id=user.browse(cr, uid, uid),
- quote_plus=quote_plus,
- )
- qweb_values.setdefault('editable', False)
-
- # in edit mode ir.ui.view will tag nodes
- context['inherit_branding'] = qweb_values['editable']
-
- view = self.get_template(cr, uid, ids, template)
-
- if 'main_object' not in qweb_values:
- qweb_values['main_object'] = view
- return view.render(qweb_values, engine='website.qweb', context=context)
+ # TODO: remove this. (just kept for backward api compatibility for saas-3)
+ return self.pool['ir.ui.view'].render(cr, uid, template, values=values, context=context)
def render(self, cr, uid, ids, template, values=None, status_code=None, context=None):
- def callback(template, values, context):
- return self._render(cr, uid, ids, template, values, context)
- if values is None:
- values = {}
- return LazyResponse(callback, status_code=status_code, template=template, values=values, context=context)
+ # TODO: remove this. (just kept for backward api compatibility for saas-3)
+ return request.render(template, values, uid=uid)
def pager(self, cr, uid, ids, url, total, page=1, step=30, scope=5, url_args=None, context=None):
# Compute Pager
pmin = pmax - scope if pmax - scope > 0 else 1
def get_url(page):
- _url = "%spage/%s/" % (url, page)
+ _url = "%s/page/%s" % (url, page) if page > 1 else url
if url_args:
_url = "%s?%s" % (_url, werkzeug.url_encode(url_args))
return _url
endpoint = rule.endpoint
methods = rule.methods or ['GET']
converters = rule._converters.values()
-
- return (
- 'GET' in methods
+ if not ('GET' in methods
and endpoint.routing['type'] == 'http'
and endpoint.routing['auth'] in ('none', 'public')
and endpoint.routing.get('website', False)
- # preclude combinatorial explosion by only allowing a single converter
- and len(converters) <= 1
- # ensure all converters on the rule are able to generate values for
- # themselves
and all(hasattr(converter, 'generate') for converter in converters)
- ) and self.endpoint_is_enumerable(rule)
-
- def endpoint_is_enumerable(self, rule):
- """ Verifies that it's possible to generate a valid url for the rule's
- endpoint
-
- :type rule: werkzeug.routing.Rule
- :rtype: bool
- """
- spec = inspect.getargspec(rule.endpoint.method)
-
- # if *args bail the fuck out, only dragons can live there
- if spec.varargs:
+ and endpoint.routing.get('website')):
return False
- # remove all arguments with a default value from the list
- defaults_count = len(spec.defaults or []) # spec.defaults can be None
- # a[:-0] ~ a[:0] ~ [] -> replace defaults_count == 0 by None to get
- # a[:None] ~ a
- args = spec.args[:(-defaults_count or None)]
+ # dont't list routes without argument having no default value or converter
+ spec = inspect.getargspec(endpoint.method.original_func)
+
+ # remove self and arguments having a default value
+ defaults_count = len(spec.defaults or [])
+ args = spec.args[1:(-defaults_count or None)]
- # params with defaults were removed, leftover allowed are:
- # * self (technically should be first-parameter-of-instance-method but whatever)
- # * any parameter mapping to a converter
- return all(
- (arg == 'self' or arg in rule._converters)
- for arg in args)
+ # check that all args have a converter
+ return all( (arg in rule._converters) for arg in args)
def enumerate_pages(self, cr, uid, ids, query_string=None, context=None):
""" Available pages in the website/CMS. This is mostly used for links
"""
router = request.httprequest.app.get_db_router(request.db)
# Force enumeration to be performed as public user
- uid = self.get_public_user(cr, uid, context=context)
+ uid = request.website.user_id.id
+ url_list = []
for rule in router.iter_rules():
if not self.rule_is_enumerable(rule):
continue
- converters = rule._converters
- filtered = bool(converters)
- if converters:
- # allow single converter as decided by fp, checked by
- # rule_is_enumerable
- [(name, converter)] = converters.items()
- converter_values = converter.generate(
- request.cr, uid, query=query_string, context=context)
- generated = ({k: v} for k, v in itertools.izip(
- itertools.repeat(name), converter_values))
- else:
- # force single iteration for literal urls
- generated = [{}]
-
- for values in generated:
- domain_part, url = rule.build(values, append_unknown=False)
- page = {'name': url, 'url': url}
-
- if not filtered and query_string and not self.page_matches(cr, uid, page, query_string, context=context):
+ converters = rule._converters or {}
+ values = [{}]
+ convitems = converters.items()
+ # converters with a domain are processed after the other ones
+ gd = lambda x: hasattr(x[1], 'domain') and (x[1].domain <> '[]')
+ convitems.sort(lambda x, y: cmp(gd(x), gd(y)))
+ for (name, converter) in convitems:
+ newval = []
+ for val in values:
+ for v in converter.generate(request.cr, uid, query=query_string, args=val, context=context):
+ newval.append( val.copy() )
+ v[name] = v['loc']
+ del v['loc']
+ newval[-1].update(v)
+ values = newval
+
+ for value in values:
+ domain_part, url = rule.build(value, append_unknown=False)
+ page = {'loc': url}
+ for key,val in value.items():
+ if key.startswith('__'):
+ page[key[2:]] = val
+ if url in ('/sitemap.xml',):
+ continue
+ if url in url_list:
+ continue
+ url_list.append(url)
+ if query_string and not self.page_matches(cr, uid, page, query_string, context=context):
continue
yield page
_description = "Website Menu"
_columns = {
'name': fields.char('Menu', size=64, required=True, translate=True),
- 'url': fields.char('Url', required=True, translate=True),
+ 'url': fields.char('Url', translate=True),
'new_window': fields.boolean('New Window'),
'sequence': fields.integer('Sequence'),
# TODO: support multiwebsite once done for ir.ui.views
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
}
+
+ def __defaults_sequence(self, cr, uid, context):
+ menu = self.search_read(cr, uid, [(1,"=",1)], ["sequence"], limit=1, order="sequence DESC", context=context)
+ return menu and menu[0]["sequence"] or 0
+
_defaults = {
'url': '',
- 'sequence': 0,
+ 'sequence': __defaults_sequence,
'new_window': False,
}
_parent_store = True
def _website_url_get(self, cr, uid, ids, name, arg, context=None):
result = {}
for attach in self.browse(cr, uid, ids, context=context):
- if attach.type == 'url':
+ if attach.url:
result[attach.id] = attach.url
else:
result[attach.id] = urlplus('/website/image', {
'model': 'ir.attachment',
'field': 'datas',
- 'id': attach.id,
- 'max_width': 1024,
- 'max_height': 768,
+ 'id': attach.id
})
return result
+ def _datas_checksum(self, cr, uid, ids, name, arg, context=None):
+ return dict(
+ (attach['id'], self._compute_checksum(attach))
+ for attach in self.read(
+ cr, uid, ids, ['res_model', 'res_id', 'type', 'datas'],
+ context=context)
+ )
+
+ def _compute_checksum(self, attachment_dict):
+ if attachment_dict.get('res_model') == 'ir.ui.view'\
+ and not attachment_dict.get('res_id') and not attachment_dict.get('url')\
+ and attachment_dict.get('type', 'binary') == 'binary'\
+ and attachment_dict.get('datas'):
+ return hashlib.new('sha1', attachment_dict['datas']).hexdigest()
+ return None
+
+ def _datas_big(self, cr, uid, ids, name, arg, context=None):
+ result = dict.fromkeys(ids, False)
+ if context and context.get('bin_size'):
+ return result
+
+ for record in self.browse(cr, uid, ids, context=context):
+ if not record.datas: continue
+ try:
+ result[record.id] = openerp.tools.image_resize_image_big(record.datas)
+ except IOError: # apparently the error PIL.Image.open raises
+ pass
+
+ return result
+
_columns = {
- 'website_url': fields.function(_website_url_get, string="Attachment URL", type='char')
+ 'datas_checksum': fields.function(_datas_checksum, size=40,
+ string="Datas checksum", type='char', store=True, select=True),
+ 'website_url': fields.function(_website_url_get, string="Attachment URL", type='char'),
+ 'datas_big': fields.function (_datas_big, type='binary', store=True,
+ string="Resized file content"),
+ 'mimetype': fields.char('Mime Type', readonly=True),
}
+ def _add_mimetype_if_needed(self, values):
+ if values.get('datas_fname'):
+ values['mimetype'] = mimetypes.guess_type(values.get('datas_fname'))[0] or 'application/octet-stream'
+
+ def create(self, cr, uid, values, context=None):
+ chk = self._compute_checksum(values)
+ if chk:
+ match = self.search(cr, uid, [('datas_checksum', '=', chk)], context=context)
+ if match:
+ return match[0]
+ self._add_mimetype_if_needed(values)
+ return super(ir_attachment, self).create(
+ cr, uid, values, context=context)
+
+ def write(self, cr, uid, ids, values, context=None):
+ self._add_mimetype_if_needed(values)
+ return super(ir_attachment, self).write(cr, uid, ids, values, context=context)
+
+ def try_remove(self, cr, uid, ids, context=None):
+ """ Removes a web-based image attachment if it is used by no view
+ (template)
+
+ Returns a dict mapping attachments which would not be removed (if any)
+ mapped to the views preventing their removal
+ """
+ Views = self.pool['ir.ui.view']
+ attachments_to_remove = []
+ # views blocking removal of the attachment
+ removal_blocked_by = {}
+
+ for attachment in self.browse(cr, uid, ids, context=context):
+ # in-document URLs are html-escaped, a straight search will not
+ # find them
+ url = werkzeug.utils.escape(attachment.website_url)
+ ids = Views.search(cr, uid, ["|", ('arch', 'like', '"%s"' % url), ('arch', 'like', "'%s'" % url)], context=context)
+
+ if ids:
+ removal_blocked_by[attachment.id] = Views.read(
+ cr, uid, ids, ['name'], context=context)
+ else:
+ attachments_to_remove.append(attachment.id)
+ if attachments_to_remove:
+ self.unlink(cr, uid, attachments_to_remove, context=context)
+ return removal_blocked_by
+
class res_partner(osv.osv):
_inherit = "res.partner"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
- 'center': '%s, %s %s, %s' % (partner.street, partner.city, partner.zip, partner.country_id and partner.country_id.name_get()[0][1] or ''),
+ 'center': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'size': "%sx%s" % (height, width),
'zoom': zoom,
'sensor': 'false',
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
- 'q': '%s, %s %s, %s' % (partner.street, partner.city, partner.zip, partner.country_id and partner.country_id.name_get()[0][1] or ''),
+ 'q': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
+ 'z': 10
}
- return urlplus('https://maps.google.be/maps' , params)
+ return urlplus('https://maps.google.com/maps' , params)
class res_company(osv.osv):
_inherit = "res.company"