Skip to content
119 changes: 96 additions & 23 deletions backend/users/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -1304,7 +1304,8 @@ def get_user_analytics(self, request):
status=status.HTTP_400_BAD_REQUEST,
)

project_type_lower = project_type.lower()
project_type_lower = (project_type or " ").lower()
print("project_type_lower", project_type_lower)
is_textual_project = (
False if project_type in get_audio_project_types() else True
) # flag for distinguishing between textual and audio projects
Expand All @@ -1326,16 +1327,19 @@ def get_user_analytics(self, request):
annotation_reviewers=user_id,
project_type=project_type,
)
print("project_objs", project_objs)
elif supercheck_reports:
if project_type == "all":
project_objs = Project.objects.filter( # Not using the project_type filter if it is set to "all"
review_supercheckers=user_id,
)
print("project_objs", project_objs)
else:
project_objs = Project.objects.filter(
review_supercheckers=user_id,
project_type=project_type,
)
print("project_objs", project_objs)
else:
if project_type == "all":
project_objs = Project.objects.filter(
Expand All @@ -1352,6 +1356,12 @@ def get_user_analytics(self, request):
total_annotated_tasks_count = 0
all_tasks_word_count = 0
all_projects_total_duration = 0

# New total counters
total_draft_tasks = 0
total_skipped_tasks = 0
total_to_be_revised_tasks = 0
total_rejected_tasks = 0
project_wise_summary = []
for proj in project_objs:
project_name = proj.title
Expand All @@ -1360,75 +1370,126 @@ def get_user_analytics(self, request):
False if project_type in get_audio_project_types() else True
)
annotated_labeled_tasks = []
# for reviwer reports
if review_reports:
labeld_tasks_objs = Task.objects.filter(
Q(project_id=proj.id)
& Q(review_user=user_id)
& Q(
task_status__in=[
"reviewed",
"exported",
"super_checked",
]
)
& Q(task_status__in=["reviewed","exported","super_checked",])
)
print("labeld_tasks_objs", labeld_tasks_objs)

annotated_task_ids = list(
labeld_tasks_objs.values_list("id", flat=True)
)
print("annotated_task_ids", annotated_task_ids)
annotated_labeled_tasks = Annotation.objects.filter(
task_id__in=annotated_task_ids,
annotation_type=REVIEWER_ANNOTATION,
updated_at__range=[start_date, end_date],
completed_by=user_id,
).exclude(annotation_status__in=["to_be_revised", "draft", "skipped"])
print("annotated_labeled_tasks", annotated_labeled_tasks)
# for super_checker reports
elif supercheck_reports:
labeld_tasks_objs = Task.objects.filter(
Q(project_id=proj.id)
& Q(super_check_user=user_id)
& Q(
task_status__in=[
"exported",
"super_checked",
]
)
& Q(task_status__in=["exported","super_checked",])
)
print("labeld_tasks_objs", labeld_tasks_objs)

annotated_task_ids = list(
labeld_tasks_objs.values_list("id", flat=True)
)
print("annotated_task_ids", annotated_task_ids)
annotated_labeled_tasks = Annotation.objects.filter(
task_id__in=annotated_task_ids,
annotation_type=SUPER_CHECKER_ANNOTATION,
updated_at__range=[start_date, end_date],
completed_by=user_id,
)
print("annotated_labeled_tasks", annotated_labeled_tasks)
else:
# for annotaotor reports
labeld_tasks_objs = Task.objects.filter(
Q(project_id=proj.id)
& Q(annotation_users=user_id)
& Q(
task_status__in=[
"annotated",
"reviewed",
"exported",
"super_checked",
]
)
& Q(task_status__in=["annotated","reviewed","exported","super_checked",])
)
print("labeld_tasks_objs", labeld_tasks_objs)
annotated_task_ids = list(
labeld_tasks_objs.values_list("id", flat=True)
)
print("annotated_task_ids", annotated_task_ids)
annotated_labeled_tasks = Annotation.objects.filter(
task_id__in=annotated_task_ids,
annotation_type=ANNOTATOR_ANNOTATION,
updated_at__range=[start_date, end_date],
completed_by=user_id,
)
print("annotated_labeled_tasks", annotated_labeled_tasks)

annotated_tasks_count = annotated_labeled_tasks.count()
total_annotated_tasks_count += annotated_tasks_count

# Draft, Skipped, To Be Revised, Rejected counts
# -----------------------------
annotation_type = (
REVIEWER_ANNOTATION if review_reports else
SUPER_CHECKER_ANNOTATION if supercheck_reports else
ANNOTATOR_ANNOTATION
)


draft_tasks_count = Annotation.objects.filter(
task__project_id=proj.id,
annotation_type=annotation_type,
updated_at__range=[start_date, end_date],
completed_by=user_id,
annotation_status="draft",
).count()
print("draft_tasks_count", draft_tasks_count)

skipped_tasks_count = Annotation.objects.filter(
task__project_id=proj.id,
annotation_type=annotation_type,
updated_at__range=[start_date, end_date],
completed_by=user_id,
annotation_status="skipped",
).count()
print("skipped_tasks_count", skipped_tasks_count)

to_be_revised_tasks_count = 0
if review_reports:
to_be_revised_tasks_count = Annotation.objects.filter(
task__project_id=proj.id,
task__review_user=user_id,
annotation_status="to_be_revised",
annotation_type=REVIEWER_ANNOTATION,
updated_at__range=[start_date, end_date],
).count()
print("to_be_revised_tasks_count = ", to_be_revised_tasks_count)

rejected_tasks_count = 0
if supercheck_reports:
rejected_tasks_count = Annotation.objects.filter(
task__project_id=proj.id,
task__super_check_user=user_id,
annotation_type=SUPER_CHECKER_ANNOTATION,
updated_at__range=[start_date, end_date],
completed_by=user_id,
annotation_status="rejected",
).count()
print("rejected_tasks_count = ", rejected_tasks_count)

# Update totals
total_draft_tasks += draft_tasks_count
total_skipped_tasks += skipped_tasks_count
total_to_be_revised_tasks += to_be_revised_tasks_count
total_rejected_tasks += rejected_tasks_count

# Lead Time
avg_lead_time = 0
lead_time_annotated_tasks = [
eachtask.lead_time for eachtask in annotated_labeled_tasks
Expand All @@ -1440,6 +1501,7 @@ def get_user_analytics(self, request):
)
avg_lead_time = round(avg_lead_time, 2)

# Word Count / Audio Duration
total_word_count = 0
if "OCRTranscription" in project_type:
for each_anno in annotated_labeled_tasks:
Expand All @@ -1451,7 +1513,6 @@ def get_user_analytics(self, request):
total_word_count_list.append(each_task.task.data["word_count"])
except:
pass

total_word_count = sum(total_word_count_list)
all_tasks_word_count += total_word_count

Expand All @@ -1468,6 +1529,7 @@ def get_user_analytics(self, request):
total_duration = convert_seconds_to_hours(sum(total_duration_list))
all_projects_total_duration += sum(total_duration_list)

# Each-project result
result = {
"Project Name": project_name,
(
Expand All @@ -1479,8 +1541,13 @@ def get_user_analytics(self, request):
else "Annotated Tasks"
)
): annotated_tasks_count,
"Draft Tasks": draft_tasks_count,
"Skipped Tasks": skipped_tasks_count,
"To Be Revised Tasks": to_be_revised_tasks_count if review_reports else 0,
"Rejected Tasks": rejected_tasks_count if supercheck_reports else 0,
"Word Count": total_word_count,
"Total Segments Duration": total_duration,

(
"Avg Review Time (sec)"
if review_reports
Expand All @@ -1491,6 +1558,7 @@ def get_user_analytics(self, request):
)
): avg_lead_time,
}
print("result", result)

if project_type in get_audio_project_types():
del result["Word Count"]
Expand All @@ -1516,6 +1584,7 @@ def get_user_analytics(self, request):
):
project_wise_summary.append(result)

# Sort project summary
project_wise_summary = sorted(
project_wise_summary,
key=lambda x: x[
Expand All @@ -1541,6 +1610,7 @@ def get_user_analytics(self, request):
# total_summary = {}
# if is_translation_project or project_type == "SemanticTextualSimilarity_Scale5":

# Total summary
total_result = {
(
"Reviewed Tasks"
Expand All @@ -1561,6 +1631,7 @@ def get_user_analytics(self, request):
)
): round(all_annotated_lead_time_count, 2),
}
print("total_result", total_result)
if project_type_lower != "all" and project_type in get_audio_project_types():
del total_result["Word Count"]
elif project_type_lower != "all" and is_textual_project:
Expand All @@ -1570,11 +1641,13 @@ def get_user_analytics(self, request):
del total_result["Total Segments Duration"]

total_summary = [total_result]
print("total_summary", total_summary)

final_result = {
"total_summary": total_summary,
"project_summary": project_wise_summary,
}
print("final_result", final_result)
return Response(final_result)

@action(
Expand Down
Loading