1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
26 from tools import config
27 from tools.translate import _
28 from osv import osv, fields
30 class abstract_quality_check(object):
32 This Class is abstract class for all test
37 this method should initialize the variables
39 #This float have to store the rating of the module.
40 #Used to compute the final score (average of all scores).
44 #This char have to store the name of the test.
47 #This char have to store the aim of the test and eventually a note.
50 #This char have to store the result.
51 #Used to display the result of the test.
54 #This char have to store the result with more details.
55 #Used to provide more details if necessary.
56 self.result_details = ""
58 # This boolean variable defines that if you do not want to calculate score and just only need detail
59 # or summary report for some test then you will make it False.
60 self.bool_count_score = True
62 #This bool defines if the test can be run only if the module
64 #True => the module have to be installed.
65 #False => the module can be uninstalled.
66 self.bool_installed_only = True
68 #This variable is used to give result of test more weight,
69 #because some tests are more critical than others.
70 self.ponderation = 1.0
72 #Specify test got an error on module
75 #Specify the minimal score for the test (in percentage(%))
78 #Specify whether test should be consider for Quality checking of the module
81 #This variable used to give message if test result is good or not
84 #The tests have to subscribe itselfs in this list, that contains
85 #all the test that have to be performed.
87 self.list_folders = os.listdir(config['addons_path'] +
88 '/base_module_quality/')
89 for item in self.list_folders:
91 path = config['addons_path']+'/base_module_quality/'+item
92 if os.path.exists(path + '/' + item + '.py') and item not in ['report', 'wizard', 'security']:
93 item2 = 'base_module_quality.' + item +'.' + item
94 x_module = __import__(item2)
95 x_file = getattr(x_module, item)
96 x_obj = getattr(x_file, item)
97 self.tests.append(x_obj)
99 def run_test(self, cr, uid, module_path=""):
101 this method should do the test and fill the score, result and result_details var
103 raise osv.except_osv(_('Programming Error'), _('Test Is Not Implemented'))
105 def get_objects(self, cr, uid, module):
106 # This function returns all object of the given module..
107 pool = pooler.get_pool(cr.dbname)
108 ids2 = pool.get('ir.model.data').search(cr, uid,
109 [('module', '=', module), ('model', '=', 'ir.model')])
111 model_data = pool.get('ir.model.data').browse(cr, uid, ids2)
112 for model in model_data:
113 model_list.append(model.res_id)
115 for mod in pool.get('ir.model').browse(cr, uid, model_list):
116 obj_list.append(str(mod.model))
119 def get_model_ids(self, cr, uid, models=[]):
120 # This function returns all ids of the given objects..
123 pool = pooler.get_pool(cr.dbname)
124 return pool.get('ir.model').search(cr, uid, [('model', 'in', models)])
126 def get_ids(self, cr, uid, object_list):
127 #This method return dictionary with ids of records of object for module
128 pool = pooler.get_pool(cr.dbname)
130 for obj in object_list:
131 ids = pool.get(obj).search(cr, uid, [])
132 ids = filter(lambda id: id != None, ids or [])
133 result_ids[obj] = ids
136 def format_table(self, header=[], data_list={}): #This function can work forwidget="text_wiki"
138 detail += (header[0]) % tuple(header[1])
142 for key, value in data_list.items():
143 detail += (frow) % tuple(value)
144 detail = detail + '\n|}'
147 def format_html_table(self, header=[], data_list=[]): #This function can work for widget="html_tag"
148 # function create html table....
150 detail += (header[0]) % tuple(header[1])
153 frow += '<td>%s</td>'
155 for key, value in data_list.items():
156 detail += (frow) % tuple(value)
159 def add_quatation(self, x_no, y_no):
163 # This function return style tag with specified styles for html pages
167 border:1px solid #aaaaaa;
168 background-color:#f9f9f9;
173 border:1px dashed gray;
177 border:0.5px solid gray;
186 padding-bottom: .17em;
187 border-bottom: 1px solid #aaa;
194 class module_quality_check(osv.osv):
195 _name = 'module.quality.check'
197 'name': fields.char('Rated Module', size=64, ),
198 'final_score': fields.char('Final Score (%)', size=10,),
199 'check_detail_ids': fields.one2many('module.quality.detail', 'quality_check_id', 'Tests',)
202 def check_quality(self, cr, uid, module_name, module_state=None):
204 This function will calculate score of openerp module
205 It will return data in below format:
206 Format: {'final_score':'80.50', 'name': 'sale',
208 [(0,0,{'name':'workflow_test', 'score':'100', 'ponderation':'0', 'summary': text_wiki format data, 'detail': html format data, 'state':'done', 'note':'XXXX'}),
209 ((0,0,{'name':'terp_test', 'score':'60', 'ponderation':'1', 'summary': text_wiki format data, 'detail': html format data, 'state':'done', 'note':'terp desctioption'}),
211 So here the detail result is in html format and summary will be in text_wiki format.
213 pool = pooler.get_pool(cr.dbname)
214 obj_module = pool.get('ir.module.module')
216 module_id = obj_module.search(cr, uid, [('name', '=', module_name)])
218 module_state = obj_module.browse(cr, uid, module_id[0]).state
220 abstract_obj = abstract_quality_check()
222 ponderation_sum = 0.0
224 for test in abstract_obj.tests:
225 ad = tools.config['addons_path']
226 if module_name == 'base':
227 ad = tools.config['root_path']+'/addons'
228 module_path = os.path.join(ad, module_name)
229 val = test.quality_test()
231 if not val.bool_installed_only or module_state == "installed":
232 val.run_test(cr, uid, str(module_path))
236 'score': val.score * 100,
237 'ponderation': val.ponderation,
238 'summary': val.result,
239 'detail': val.result_details,
242 'message': val.message
244 if val.bool_count_score:
245 score_sum += val.score * val.ponderation
246 ponderation_sum += val.ponderation
251 'summary': val.result,
261 'summary': _("The module has to be installed before running this test.")
263 create_ids.append((0, 0, data))
264 final_score = ponderation_sum and '%.2f' % (score_sum / ponderation_sum * 100) or 0
267 'final_score': final_score,
268 'check_detail_ids' : create_ids,
272 module_quality_check()
274 class module_quality_detail(osv.osv):
275 _name = 'module.quality.detail'
277 'quality_check_id': fields.many2one('module.quality.check', 'Quality'),
278 'name': fields.char('Name',size=128),
279 'score': fields.float('Score (%)'),
280 'ponderation': fields.float('Ponderation', help='Some tests are more critical than others, so they have a bigger weight in the computation of final rating'),
281 'note': fields.text('Note'),
282 'summary': fields.text('Summary'),
283 'detail': fields.text('Details'),
284 'message': fields.char('Message', size=64),
285 'state': fields.selection([('done','Done'),('skipped','Skipped'),], 'State', size=6, help='The test will be completed only if the module is installed or if the test may be processed on uninstalled module.'),
288 module_quality_detail()
290 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: