Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 13 additions & 11 deletions cvat/apps/quality_control/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -446,10 +446,18 @@ def immediate_reports(self, request, *args, **kwargs):
# handle label matching task
else:
penalty_factor = 0.1
def calculate_score(gt_samples, ds_samples):
# Group annotations by label
def calculate_score(gt_samples, ds_samples, start_time=0):
gt_samples_adjusted = []
for ann in gt_samples:
adjusted_ann = ann.copy()
adjusted_points = adjusted_ann["points"].copy()
adjusted_points[0] = round(adjusted_points[0] - start_time, 10) # Round to 10 decimal places
adjusted_points[3] = round(adjusted_points[3] - start_time, 10)
adjusted_ann["points"] = adjusted_points
gt_samples_adjusted.append(adjusted_ann)

gt_by_label = {}
for idx, ann in enumerate(gt_samples):
for idx, ann in enumerate(gt_samples_adjusted):
label = ann.get("label_id", "")
if label not in gt_by_label:
gt_by_label[label] = []
Expand All @@ -466,27 +474,22 @@ def calculate_score(gt_samples, ds_samples):
total_gt_count = 0
unused_predictions = []

# Process each label separately
for label in set(list(gt_by_label.keys()) + list(ds_by_label.keys())):
gt_list = gt_by_label.get(label, [])
ds_list = ds_by_label.get(label, [])
total_gt_count += len(gt_list)

# Track best coverage for each GT
gt_coverage = [0.0] * len(gt_list)
used_predictions = [False] * len(ds_list)

# Check each GT against all predictions
for gt_idx, (orig_gt_idx, gt_ann) in enumerate(gt_list):
gt_start, gt_end = gt_ann["points"][0], gt_ann["points"][3]
gt_length = gt_end - gt_start

# Find all overlapping predictions
overlapping_predictions = []
for ds_idx, ds_ann in enumerate(ds_list):
ds_start, ds_end = ds_ann["points"][0], ds_ann["points"][3]

# Calculate overlap
overlap_start = max(gt_start, ds_start)
overlap_end = min(gt_end, ds_end)
overlap = max(0, overlap_end - overlap_start)
Expand All @@ -495,7 +498,6 @@ def calculate_score(gt_samples, ds_samples):
overlapping_predictions.append((ds_idx, overlap))
used_predictions[ds_idx] = True

# Merge overlaps to calculate total coverage
overlapping_predictions.sort(key=lambda x: x[1], reverse=True)
covered_intervals = []

Expand All @@ -520,7 +522,7 @@ def calculate_score(gt_samples, ds_samples):

# Calculate total coverage
total_covered = sum(end - start for start, end in covered_intervals)
coverage_ratio = min(1.0, total_covered / gt_length)
coverage_ratio = min(1.0, total_covered / gt_length) if gt_length > 0 else 0
gt_coverage[gt_idx] = coverage_ratio

# Add unused predictions for this label to global list
Expand All @@ -547,7 +549,7 @@ def calculate_score(gt_samples, ds_samples):

return final_score

score = calculate_score(gt_samples_filtered, ds_samples_filtered)
score = calculate_score(gt_samples_filtered, ds_samples_filtered, start_time)

response_data = {
"score": score,
Expand Down
Loading