test: get interview details for applicant dashboard
- rectify tests as per new rating field changes (fractional and configurable)
diff --git a/erpnext/hr/doctype/interview/test_interview.py b/erpnext/hr/doctype/interview/test_interview.py
index 1a2257a..fdb11af 100644
--- a/erpnext/hr/doctype/interview/test_interview.py
+++ b/erpnext/hr/doctype/interview/test_interview.py
@@ -12,6 +12,7 @@
from erpnext.hr.doctype.designation.test_designation import create_designation
from erpnext.hr.doctype.interview.interview import DuplicateInterviewRoundError
+from erpnext.hr.doctype.job_applicant.job_applicant import get_interview_details
from erpnext.hr.doctype.job_applicant.test_job_applicant import create_job_applicant
@@ -70,6 +71,20 @@
email_queue = frappe.db.sql("""select * from `tabEmail Queue`""", as_dict=True)
self.assertTrue("Subject: Interview Feedback Reminder" in email_queue[0].message)
+ def test_get_interview_details_for_applicant_dashboard(self):
+ job_applicant = create_job_applicant()
+ interview = create_interview_and_dependencies(job_applicant.name)
+
+ details = get_interview_details(job_applicant.name)
+ self.assertEqual(details.get('stars'), 5)
+ self.assertEqual(details.get('interviews').get(interview.name), {
+ 'name': interview.name,
+ 'interview_round': interview.interview_round,
+ 'expected_average_rating': interview.expected_average_rating * 5,
+ 'average_rating': interview.average_rating * 5,
+ 'status': 'Pending'
+ })
+
def tearDown(self):
frappe.db.rollback()
@@ -106,7 +121,8 @@
interview_round = frappe.new_doc("Interview Round")
interview_round.round_name = name
interview_round.interview_type = create_interview_type()
- interview_round.expected_average_rating = 4
+ # average rating = 4
+ interview_round.expected_average_rating = 0.8
if designation:
interview_round.designation = designation
diff --git a/erpnext/hr/doctype/interview_feedback/test_interview_feedback.py b/erpnext/hr/doctype/interview_feedback/test_interview_feedback.py
index d2ec5b9..19c4642 100644
--- a/erpnext/hr/doctype/interview_feedback/test_interview_feedback.py
+++ b/erpnext/hr/doctype/interview_feedback/test_interview_feedback.py
@@ -24,7 +24,7 @@
create_skill_set(['Leadership'])
interview_feedback = create_interview_feedback(interview.name, interviewer, skill_ratings)
- interview_feedback.append("skill_assessment", {"skill": 'Leadership', 'rating': 4})
+ interview_feedback.append("skill_assessment", {"skill": 'Leadership', 'rating': 0.8})
frappe.set_user(interviewer)
self.assertRaises(frappe.ValidationError, interview_feedback.save)
@@ -50,7 +50,7 @@
avg_rating = flt(total_rating / len(feedback_1.skill_assessment) if len(feedback_1.skill_assessment) else 0)
- self.assertEqual(flt(avg_rating, 3), feedback_1.average_rating)
+ self.assertEqual(flt(avg_rating, 2), flt(feedback_1.average_rating, 2))
avg_on_interview_detail = frappe.db.get_value('Interview Detail', {
'parent': feedback_1.interview,
@@ -59,7 +59,7 @@
}, 'average_rating')
# 1. average should be reflected in Interview Detail.
- self.assertEqual(avg_on_interview_detail, feedback_1.average_rating)
+ self.assertEqual(flt(avg_on_interview_detail, 2), flt(feedback_1.average_rating, 2))
'''For Second Interviewer Feedback'''
interviewer = interview.interview_details[1].interviewer
@@ -97,5 +97,5 @@
skills = frappe.get_all("Expected Skill Set", filters={"parent": interview_round}, fields = ["skill"])
for d in skills:
- d["rating"] = random.randint(1, 5)
+ d["rating"] = random.random()
return skills