Skip to content

Commit

Permalink
[REF] survey: on survey.user_input, rename some fields to ease unders…
Browse files Browse the repository at this point in the history
…tanding

PURPOSE

As new features are about to land in survey, notably live interactions [1]
and new survey building [2] performing a pre cleaning is necessary. In this
PR we clean survey models by: removing unnecessary fields, cleaning some code
and finally renaming models.

SPECIFICATIONS: RENAME QUIZ_SCORE ON SURVEY.USER_INPUT

On survey.user_input, quiz_score field to scoring_percentage

``quiz_score`` field name is related to the old "quiz" behavior of surveys
that is replaced by certifications and scoring mechanisms. Let us propagate
the renaming, beginning with a ``scoring_`` prefix.

SPECIFICATIONS: RENAME QUIZZ_PASSED ON SURVEY.USER_INPUT

on survey.user_input, rename quizz_passed field to scoring_success

``quizz_passed`` field name is related to the old "quiz" behavior of surveys
that is replaced by certifications and scoring mechanisms. Let us propagate
the renaming, beginning with a ``scoring_`` prefix.

SPECIFICATIONS: RENAME TOKEN ON SURVEY.USER_INPUT

on survey.user_input, rename token field to access_token

Survey user input model holds two token field. One is used to distinguish
a pool of attempts linked to a given invite: ``invite_token``. The other
one is used to control access to a specific user input. It means that
``invite_token`` indicates a set of user inputs and each of them is accessed
through its own ``token``. To be coherent with other naming in odoo this
latter field is renamed to ``access_token``.

LINKS

[0] Related to Task ID 2061901 (survey models cleaning and preparation)
[1] Task ID 1972640 (live interactions)
[2] Task ID 2119587 (new frontend for building surveys)

PR #40765
  • Loading branch information
tde-banana-odoo committed Dec 5, 2019
1 parent 1e66894 commit 755ac5f
Show file tree
Hide file tree
Showing 20 changed files with 104 additions and 104 deletions.
4 changes: 2 additions & 2 deletions addons/hr_recruitment_survey/tests/test_recruitment_survey.py
Expand Up @@ -31,7 +31,7 @@ def test_start_survey(self):
action_start = self.job_sysadmin.action_start_survey()
self.assertEqual(action_start['type'], 'ir.actions.act_url')
self.assertNotEqual(self.job_sysadmin.response_id.id, False)
self.assertIn(self.job_sysadmin.response_id.token, action_start['url'])
self.assertIn(self.job_sysadmin.response_id.access_token, action_start['url'])
action_start_with_response = self.job_sysadmin.action_start_survey()
self.assertEqual(action_start_with_response, action_start)

Expand All @@ -42,4 +42,4 @@ def test_print_survey(self):
self.assertEqual(action_print['type'], 'ir.actions.act_url')
self.job_sysadmin.response_id = self.env['survey.user_input'].create({'survey_id': self.survey_sysadmin.id})
action_print_with_response = self.job_sysadmin.action_print_survey()
self.assertIn(self.job_sysadmin.response_id.token, action_print_with_response['url'])
self.assertIn(self.job_sysadmin.response_id.access_token, action_print_with_response['url'])
2 changes: 1 addition & 1 deletion addons/hr_skills_survey/models/survey_user.py
Expand Up @@ -16,7 +16,7 @@ def _mark_done(self):

super(SurveyUserInput, self)._mark_done()

certification_user_inputs = self.filtered(lambda user_input: user_input.survey_id.certification and user_input.quizz_passed)
certification_user_inputs = self.filtered(lambda user_input: user_input.survey_id.certification and user_input.scoring_success)
partner_has_completed = {user_input.partner_id.id: user_input.survey_id for user_input in certification_user_inputs}
employees = self.env['hr.employee'].sudo().search([('user_id.partner_id', 'in', certification_user_inputs.mapped('partner_id').ids)])
for employee in employees:
Expand Down
44 changes: 22 additions & 22 deletions addons/survey/controllers/main.py
Expand Up @@ -34,7 +34,7 @@ def _fetch_from_access_token(self, survey_token, answer_token):
else:
answer_sudo = request.env['survey.user_input'].sudo().search([
('survey_id', '=', survey_sudo.id),
('token', '=', answer_token)
('access_token', '=', answer_token)
], limit=1)
return survey_sudo, answer_sudo

Expand Down Expand Up @@ -127,20 +127,20 @@ def _redirect_with_error(self, access_data, error_key):
return request.render("survey.survey_void_content", {'survey': survey_sudo, 'answer': answer_sudo})
elif error_key == 'survey_closed' and access_data['can_answer']:
return request.render("survey.survey_closed_expired", {'survey': survey_sudo})
elif error_key == 'survey_auth' and answer_sudo.token:
elif error_key == 'survey_auth' and answer_sudo.access_token:
if answer_sudo.partner_id and (answer_sudo.partner_id.user_ids or survey_sudo.users_can_signup):
if answer_sudo.partner_id.user_ids:
answer_sudo.partner_id.signup_cancel()
else:
answer_sudo.partner_id.signup_prepare(expiration=fields.Datetime.now() + relativedelta(days=1))
redirect_url = answer_sudo.partner_id._get_signup_url_for_action(url='/survey/start/%s?answer_token=%s' % (survey_sudo.access_token, answer_sudo.token))[answer_sudo.partner_id.id]
redirect_url = answer_sudo.partner_id._get_signup_url_for_action(url='/survey/start/%s?answer_token=%s' % (survey_sudo.access_token, answer_sudo.access_token))[answer_sudo.partner_id.id]
else:
redirect_url = '/web/login?redirect=%s' % ('/survey/start/%s?answer_token=%s' % (survey_sudo.access_token, answer_sudo.token))
redirect_url = '/web/login?redirect=%s' % ('/survey/start/%s?answer_token=%s' % (survey_sudo.access_token, answer_sudo.access_token))
return request.render("survey.survey_auth_required", {'survey': survey_sudo, 'redirect_url': redirect_url})
elif error_key == 'answer_deadline' and answer_sudo.token:
elif error_key == 'answer_deadline' and answer_sudo.access_token:
return request.render("survey.survey_closed_expired", {'survey': survey_sudo})
elif error_key == 'answer_done' and answer_sudo.token:
return request.render("survey.survey_closed_finished", self._prepare_survey_finished_values(survey_sudo, answer_sudo, token=answer_sudo.token))
elif error_key == 'answer_done' and answer_sudo.access_token:
return request.render("survey.survey_closed_finished", self._prepare_survey_finished_values(survey_sudo, answer_sudo, token=answer_sudo.access_token))

return werkzeug.utils.redirect("/")

Expand All @@ -153,7 +153,7 @@ def survey_test(self, survey_token, **kwargs):
answer_sudo = survey_sudo._create_answer(user=request.env.user, test_entry=True)
except:
return werkzeug.utils.redirect('/')
return request.redirect('/survey/start/%s?%s' % (survey_sudo.access_token, keep_query('*', answer_token=answer_sudo.token)))
return request.redirect('/survey/start/%s?%s' % (survey_sudo.access_token, keep_query('*', answer_token=answer_sudo.access_token)))

@http.route('/survey/retry/<string:survey_token>/<string:answer_token>', type='http', auth='public', website=True)
def survey_retry(self, survey_token, answer_token, **post):
Expand All @@ -178,7 +178,7 @@ def survey_retry(self, survey_token, answer_token, **post):
)
except:
return werkzeug.utils.redirect("/")
return request.redirect('/survey/start/%s?%s' % (survey_sudo.access_token, keep_query('*', answer_token=retry_answer_sudo.token)))
return request.redirect('/survey/start/%s?%s' % (survey_sudo.access_token, keep_query('*', answer_token=retry_answer_sudo.access_token)))

def _prepare_retry_additional_values(self, answer):
return {
Expand Down Expand Up @@ -220,7 +220,7 @@ def survey_start(self, survey_token, answer_token=None, email=False, **post):
data = {'survey': survey_sudo, 'answer': answer_sudo, 'page': 0}
return request.render('survey.survey_page_start', data)
else:
return request.redirect('/survey/fill/%s/%s' % (survey_sudo.access_token, answer_sudo.token))
return request.redirect('/survey/fill/%s/%s' % (survey_sudo.access_token, answer_sudo.access_token))

@http.route('/survey/fill/<string:survey_token>/<string:answer_token>', type='http', auth='public', website=True)
def survey_display_page(self, survey_token, answer_token, **post):
Expand Down Expand Up @@ -459,7 +459,7 @@ def survey_get_certification(self, survey_id, **kwargs):
succeeded_attempt = request.env['survey.user_input'].sudo().search([
('partner_id', '=', request.env.user.partner_id.id),
('survey_id', '=', survey_id),
('quizz_passed', '=', True)
('scoring_success', '=', True)
], limit=1)

if not succeeded_attempt:
Expand Down Expand Up @@ -542,31 +542,31 @@ def _get_scoring_data(self, survey):

count_data = request.env['survey.user_input'].read_group(
[('survey_id', '=', survey.id), ('state', '=', 'done'), ('test_entry', '=', False)],
['quizz_passed', 'id:count_distinct'],
['quizz_passed']
['scoring_success', 'id:count_distinct'],
['scoring_success']
)

quizz_passed_count = 0
quizz_failed_count = 0
scoring_success_count = 0
scoring_failed_count = 0
for count_data_item in count_data:
if count_data_item['quizz_passed']:
quizz_passed_count = count_data_item['quizz_passed_count']
if count_data_item['scoring_success']:
scoring_success_count = count_data_item['scoring_success_count']
else:
quizz_failed_count = count_data_item['quizz_passed_count']
scoring_failed_count = count_data_item['scoring_success_count']

graph_data = [{
'text': _('Passed'),
'count': quizz_passed_count,
'count': scoring_success_count,
'color': '#2E7D32'
}, {
'text': _('Missed'),
'count': quizz_failed_count,
'count': scoring_failed_count,
'color': '#C62828'
}]

total_quizz_passed = quizz_passed_count + quizz_failed_count
total_scoring_success = scoring_success_count + scoring_failed_count
return {
'success_rate': round((quizz_passed_count / total_quizz_passed) * 100, 1) if total_quizz_passed > 0 else 0,
'success_rate': round((scoring_success_count / total_scoring_success) * 100, 1) if total_scoring_success > 0 else 0,
'graph_data': graph_data
}

Expand Down
2 changes: 1 addition & 1 deletion addons/survey/models/res_partner.py
Expand Up @@ -13,7 +13,7 @@ class ResPartner(models.Model):
@api.depends('is_company')
def _compute_certifications_count(self):
read_group_res = self.env['survey.user_input'].sudo().read_group(
[('partner_id', 'in', self.ids), ('quizz_passed', '=', True)],
[('partner_id', 'in', self.ids), ('scoring_success', '=', True)],
['partner_id'], 'partner_id'
)
data = dict((res['partner_id'][0], res['partner_id_count']) for res in read_group_res)
Expand Down
16 changes: 8 additions & 8 deletions addons/survey/models/survey_survey.py
Expand Up @@ -122,7 +122,7 @@ def _compute_users_can_signup(self):
for survey in self:
survey.users_can_signup = signup_allowed

@api.depends('user_input_ids.state', 'user_input_ids.test_entry', 'user_input_ids.quizz_score', 'user_input_ids.quizz_passed')
@api.depends('user_input_ids.state', 'user_input_ids.test_entry', 'user_input_ids.scoring_percentage', 'user_input_ids.scoring_success')
def _compute_survey_statistic(self):
default_vals = {
'answer_count': 0, 'answer_done_count': 0, 'success_count': 0,
Expand All @@ -132,13 +132,13 @@ def _compute_survey_statistic(self):
UserInput = self.env['survey.user_input']
base_domain = ['&', ('survey_id', 'in', self.ids), ('test_entry', '!=', True)]

read_group_res = UserInput.read_group(base_domain, ['survey_id', 'state'], ['survey_id', 'state', 'quizz_score', 'quizz_passed'], lazy=False)
read_group_res = UserInput.read_group(base_domain, ['survey_id', 'state'], ['survey_id', 'state', 'scoring_percentage', 'scoring_success'], lazy=False)
for item in read_group_res:
stat[item['survey_id'][0]]['answer_count'] += item['__count']
stat[item['survey_id'][0]]['answer_score_avg_total'] += item['quizz_score']
stat[item['survey_id'][0]]['answer_score_avg_total'] += item['scoring_percentage']
if item['state'] == 'done':
stat[item['survey_id'][0]]['answer_done_count'] += item['__count']
if item['quizz_passed']:
if item['scoring_success']:
stat[item['survey_id'][0]]['success_count'] += item['__count']

for survey_id, values in stat.items():
Expand Down Expand Up @@ -470,7 +470,7 @@ def action_send_survey(self):
def action_start_survey(self, answer=None):
""" Open the website page with the survey form """
self.ensure_one()
url = '%s?%s' % (self.get_start_url(), werkzeug.urls.url_encode({'answer_token': answer.token or None}))
url = '%s?%s' % (self.get_start_url(), werkzeug.urls.url_encode({'answer_token': answer.access_token or None}))
return {
'type': 'ir.actions.act_url',
'name': "Start Survey",
Expand All @@ -481,7 +481,7 @@ def action_start_survey(self, answer=None):
def action_print_survey(self, answer=None):
""" Open the website page with the survey printable view """
self.ensure_one()
url = '%s?%s' % (self.get_print_url(), werkzeug.urls.url_encode({'answer_token': answer.token or None}))
url = '%s?%s' % (self.get_print_url(), werkzeug.urls.url_encode({'answer_token': answer.access_token or None}))
return {
'type': 'ir.actions.act_url',
'name': "Print Survey",
Expand Down Expand Up @@ -524,7 +524,7 @@ def action_survey_user_input_certified(self):
action = action_rec.read()[0]
ctx = dict(self.env.context)
ctx.update({'search_default_survey_id': self.ids[0],
'search_default_quizz_passed': 1,
'search_default_scoring_success': 1,
'search_default_not_test': 1})
action['context'] = ctx
return action
Expand Down Expand Up @@ -724,7 +724,7 @@ def _create_certification_badge_trigger(self):
goal = self.env['gamification.goal.definition'].create({
'name': self.title,
'description': "%s certification passed" % self.title,
'domain': "['&', ('survey_id', '=', %s), ('quizz_passed', '=', True)]" % self.id,
'domain': "['&', ('survey_id', '=', %s), ('scoring_success', '=', True)]" % self.id,
'computation_mode': 'count',
'display_mode': 'boolean',
'model_id': self.env.ref('survey.model_survey_user_input').id,
Expand Down
40 changes: 20 additions & 20 deletions addons/survey/models/survey_user.py
Expand Up @@ -32,41 +32,41 @@ class SurveyUserInput(models.Model):
# attempts management
is_attempts_limited = fields.Boolean("Limited number of attempts", related='survey_id.is_attempts_limited')
attempts_limit = fields.Integer("Number of attempts", related='survey_id.attempts_limit')
attempt_number = fields.Integer("Attempt n°", compute='_compute_attempt_number')
attempts_number = fields.Integer("Attempt n°", compute='_compute_attempts_number')
is_time_limit_reached = fields.Boolean("Is time limit reached?", compute='_compute_is_time_limit_reached')
# identification / access
token = fields.Char('Identification token', default=lambda self: str(uuid.uuid4()), readonly=True, required=True, copy=False)
access_token = fields.Char('Identification token', default=lambda self: str(uuid.uuid4()), readonly=True, required=True, copy=False)
invite_token = fields.Char('Invite token', readonly=True, copy=False) # no unique constraint, as it identifies a pool of attempts
partner_id = fields.Many2one('res.partner', string='Partner', readonly=True)
email = fields.Char('E-mail', readonly=True)
# questions / answers
user_input_line_ids = fields.One2many('survey.user_input.line', 'user_input_id', string='Answers', copy=True)
question_ids = fields.Many2many('survey.question', string='Predefined Questions', readonly=True)
quizz_score = fields.Float("Score (%)", compute="_compute_quizz_score", store=True, compute_sudo=True) # stored for perf reasons
quizz_passed = fields.Boolean('Quizz Passed', compute='_compute_quizz_passed', store=True, compute_sudo=True) # stored for perf reasons
scoring_percentage = fields.Float("Score (%)", compute="_compute_scoring_percentage", store=True, compute_sudo=True) # stored for perf reasons
scoring_success = fields.Boolean('Quizz Passed', compute='_compute_scoring_success', store=True, compute_sudo=True) # stored for perf reasons

_sql_constraints = [
('unique_token', 'UNIQUE (token)', 'A token must be unique!'),
('unique_token', 'UNIQUE (access_token)', 'An access token must be unique!'),
]

@api.depends('user_input_line_ids.answer_score', 'user_input_line_ids.question_id')
def _compute_quizz_score(self):
def _compute_scoring_percentage(self):
for user_input in self:
total_possible_score = sum([
answer_score if answer_score > 0 else 0
for answer_score in user_input.question_ids.mapped('suggested_answer_ids.answer_score')
])

if total_possible_score == 0:
user_input.quizz_score = 0
user_input.scoring_percentage = 0
else:
score = (sum(user_input.user_input_line_ids.mapped('answer_score')) / total_possible_score) * 100
user_input.quizz_score = round(score, 2) if score > 0 else 0
user_input.scoring_percentage = round(score, 2) if score > 0 else 0

@api.depends('quizz_score', 'survey_id.scoring_success_min')
def _compute_quizz_passed(self):
@api.depends('scoring_percentage', 'survey_id.scoring_success_min')
def _compute_scoring_success(self):
for user_input in self:
user_input.quizz_passed = user_input.quizz_score >= user_input.survey_id.scoring_success_min
user_input.scoring_success = user_input.scoring_percentage >= user_input.survey_id.scoring_success_min

@api.depends('start_datetime', 'survey_id.is_time_limited', 'survey_id.time_limit')
def _compute_is_time_limit_reached(self):
Expand All @@ -76,16 +76,16 @@ def _compute_is_time_limit_reached(self):
> user_input.start_datetime + relativedelta(minutes=user_input.survey_id.time_limit)

@api.depends('state', 'test_entry', 'survey_id.is_attempts_limited', 'partner_id', 'email', 'invite_token')
def _compute_attempt_number(self):
def _compute_attempts_number(self):
attempts_to_compute = self.filtered(
lambda user_input: user_input.state == 'done' and not user_input.test_entry and user_input.survey_id.is_attempts_limited
)

for user_input in (self - attempts_to_compute):
user_input.attempt_number = 1
user_input.attempts_number = 1

if attempts_to_compute:
self.env.cr.execute("""SELECT user_input.id, (COUNT(previous_user_input.id) + 1) AS attempt_number
self.env.cr.execute("""SELECT user_input.id, (COUNT(previous_user_input.id) + 1) AS attempts_number
FROM survey_user_input user_input
LEFT OUTER JOIN survey_user_input previous_user_input
ON user_input.survey_id = previous_user_input.survey_id
Expand All @@ -101,13 +101,13 @@ def _compute_attempt_number(self):
attempts_count_results = self.env.cr.dictfetchall()

for user_input in attempts_to_compute:
attempt_number = 1
attempts_number = 1
for attempts_count_result in attempts_count_results:
if attempts_count_result['id'] == user_input.id:
attempt_number = attempts_count_result['attempt_number']
attempts_number = attempts_count_result['attempts_number']
break

user_input.attempt_number = attempt_number
user_input.attempts_number = attempts_number

def action_resend(self):
partners = self.env['res.partner']
Expand All @@ -131,7 +131,7 @@ def action_print_answers(self):
'type': 'ir.actions.act_url',
'name': "View Answers",
'target': 'self',
'url': '/survey/print/%s?answer_token=%s' % (self.survey_id.access_token, self.token)
'url': '/survey/print/%s?answer_token=%s' % (self.survey_id.access_token, self.access_token)
}

@api.model
Expand All @@ -150,7 +150,7 @@ def _mark_done(self):
Challenge = self.env['gamification.challenge'].sudo()
badge_ids = []
for user_input in self:
if user_input.survey_id.certification and user_input.quizz_passed:
if user_input.survey_id.certification and user_input.scoring_success:
if user_input.survey_id.certification_mail_template_id and not user_input.test_entry:
user_input.survey_id.certification_mail_template_id.send_mail(user_input.id, notif_layout="mail.mail_notification_light")
if user_input.survey_id.certification_give_badge:
Expand All @@ -163,7 +163,7 @@ def _mark_done(self):

def get_start_url(self):
self.ensure_one()
return '%s?answer_token=%s' % (self.survey_id.get_start_url(), self.token)
return '%s?answer_token=%s' % (self.survey_id.get_start_url(), self.access_token)

def get_print_url(self):
self.ensure_one()
Expand Down

0 comments on commit 755ac5f

Please sign in to comment.