Skip to content

Commit 82c499d

Browse files
authored
Merge pull request #998 from scap3yvt/997_panoptica_integration
Added instance wise segmentation metrics
2 parents 001c927 + d46bf05 commit 82c499d

File tree

10 files changed

+181
-12
lines changed

10 files changed

+181
-12
lines changed

.devcontainer/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM mcr.microsoft.com/devcontainers/python:3.9-bullseye
1+
FROM mcr.microsoft.com/devcontainers/python:3.11-bullseye
22

33
# Copy environment.yml (if found) to a temp location so we update the environment. Also
44
# copy "noop.txt" so the COPY instruction does not fail if no environment.yml exists.

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ __pycache__
44
*.egg-info*
55
*/__pycache__/*
66
.vscode
7+
.vscode/*
78
*.py.*
89
*.pkl
910
*.swp

.spelling/.spelling/expect.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -732,3 +732,7 @@ kwonly
732732
torchscript
733733
hann
734734
numcodecs
735+
ASSD
736+
listmetric
737+
panoptica
738+
RVAE

GANDLF/cli/generate_metrics.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
mean_squared_log_error,
2121
mean_absolute_error,
2222
ncc_metrics,
23+
generate_instance_segmentation,
2324
)
2425
from GANDLF.losses.segmentation import dice
2526
from GANDLF.metrics.segmentation import (
@@ -259,6 +260,26 @@ def generate_metrics_dict(
259260
"volumeSimilarity_" + str(class_index)
260261
] = label_overlap_filter.GetVolumeSimilarity()
261262

263+
elif problem_type == "segmentation_brats":
264+
for _, row in tqdm(input_df.iterrows(), total=input_df.shape[0]):
265+
current_subject_id = row["SubjectID"]
266+
overall_stats_dict[current_subject_id] = {}
267+
label_image = torchio.LabelMap(row["Target"])
268+
pred_image = torchio.LabelMap(row["Prediction"])
269+
label_tensor = label_image.data
270+
pred_tensor = pred_image.data
271+
spacing = label_image.spacing
272+
if label_tensor.data.shape[-1] == 1:
273+
spacing = spacing[0:2]
274+
# add dimension for batch
275+
parameters["subject_spacing"] = torch.Tensor(spacing).unsqueeze(0)
276+
label_array = label_tensor.unsqueeze(0).numpy()
277+
pred_array = pred_tensor.unsqueeze(0).numpy()
278+
279+
overall_stats_dict[current_subject_id] = generate_instance_segmentation(
280+
prediction=pred_array, target=label_array
281+
)
282+
262283
elif problem_type == "synthesis":
263284

264285
def __fix_2d_tensor(input_tensor):

GANDLF/metrics/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
)
4141
import GANDLF.metrics.classification as classification
4242
import GANDLF.metrics.regression as regression
43+
from .segmentation_panoptica import generate_instance_segmentation
4344

4445

4546
# global defines for the metrics
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
!Panoptica_Evaluator
2+
decision_metric: null
3+
decision_threshold: null
4+
edge_case_handler: !EdgeCaseHandler
5+
empty_list_std: !EdgeCaseResult NAN
6+
listmetric_zeroTP_handling:
7+
!Metric DSC: !MetricZeroTPEdgeCaseHandling {empty_prediction_result: !EdgeCaseResult ZERO,
8+
empty_reference_result: !EdgeCaseResult ZERO, no_instances_result: !EdgeCaseResult NAN,
9+
normal: !EdgeCaseResult ZERO}
10+
!Metric clDSC: !MetricZeroTPEdgeCaseHandling {empty_prediction_result: !EdgeCaseResult ZERO,
11+
empty_reference_result: !EdgeCaseResult ZERO, no_instances_result: !EdgeCaseResult NAN,
12+
normal: !EdgeCaseResult ZERO}
13+
!Metric IOU: !MetricZeroTPEdgeCaseHandling {empty_prediction_result: !EdgeCaseResult ZERO,
14+
empty_reference_result: !EdgeCaseResult ZERO, no_instances_result: !EdgeCaseResult NAN,
15+
normal: !EdgeCaseResult ZERO}
16+
!Metric ASSD: !MetricZeroTPEdgeCaseHandling {empty_prediction_result: !EdgeCaseResult INF,
17+
empty_reference_result: !EdgeCaseResult INF, no_instances_result: !EdgeCaseResult NAN,
18+
normal: !EdgeCaseResult INF}
19+
!Metric RVD: !MetricZeroTPEdgeCaseHandling {empty_prediction_result: !EdgeCaseResult NAN,
20+
empty_reference_result: !EdgeCaseResult NAN, no_instances_result: !EdgeCaseResult NAN,
21+
normal: !EdgeCaseResult NAN}
22+
!Metric RVAE: !MetricZeroTPEdgeCaseHandling {empty_prediction_result: !EdgeCaseResult NAN,
23+
empty_reference_result: !EdgeCaseResult NAN, no_instances_result: !EdgeCaseResult NAN,
24+
normal: !EdgeCaseResult NAN}
25+
expected_input: !InputType SEMANTIC
26+
global_metrics: [!Metric DSC]
27+
instance_approximator: !ConnectedComponentsInstanceApproximator {cca_backend: null}
28+
instance_matcher: !NaiveThresholdMatching {allow_many_to_one: false, matching_metric: !Metric IOU,
29+
matching_threshold: 0.5}
30+
instance_metrics: [!Metric DSC, !Metric IOU, !Metric ASSD, !Metric RVD]
31+
log_times: false
32+
save_group_times: false
33+
segmentation_class_groups: !SegmentationClassGroups
34+
groups:
35+
ed: !LabelGroup
36+
single_instance: false
37+
value_labels: [2]
38+
et: !LabelGroup
39+
single_instance: false
40+
value_labels: [3]
41+
net: !LabelGroup
42+
single_instance: false
43+
value_labels: [1]
44+
tc: !LabelMergeGroup
45+
single_instance: false
46+
value_labels: [1, 3]
47+
wt: !LabelMergeGroup
48+
single_instance: false
49+
value_labels: [1, 2, 3]
50+
verbose: false
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
from pathlib import Path
2+
3+
import numpy as np
4+
5+
from panoptica import Panoptica_Evaluator
6+
7+
8+
def generate_instance_segmentation(
9+
prediction: np.ndarray, target: np.ndarray, panoptica_config_path: str = None
10+
) -> dict:
11+
"""
12+
Evaluate a single exam using Panoptica.
13+
14+
Args:
15+
prediction (np.ndarray): The input prediction containing objects.
16+
label_path (str): The path to the reference label.
17+
panoptica_config_path (str): The path to the Panoptica configuration file.
18+
19+
Returns:
20+
dict: The evaluation results.
21+
"""
22+
23+
cwd = Path(__file__).parent.absolute()
24+
panoptica_config_path = (
25+
cwd / "panoptica_config_path.yaml"
26+
if panoptica_config_path is None
27+
else panoptica_config_path
28+
)
29+
evaluator = Panoptica_Evaluator.load_from_config(panoptica_config_path)
30+
31+
# call evaluate
32+
group2result = evaluator.evaluate(prediction_arr=prediction, reference_arr=target)
33+
34+
results = {k: r.to_dict() for k, r in group2result.items()}
35+
return results

docs/usage.md

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -280,14 +280,29 @@ SubjectID,Target,Prediction
280280
...
281281
```
282282

283-
To generate image to image metrics for synthesis tasks (including for the BraTS synthesis tasks [[1](https://www.synapse.org/#!Synapse:syn51156910/wiki/622356), [2](https://www.synapse.org/#!Synapse:syn51156910/wiki/622357)]), ensure that the config has `problem_type: synthesis`, and the CSV can be in the same format as segmentation (note that the `Mask` column is optional):
283+
### Special cases
284284

285-
```csv
286-
SubjectID,Target,Prediction,Mask
287-
001,/path/to/001/target_image.nii.gz,/path/to/001/prediction_image.nii.gz,/path/to/001/brain_mask.nii.gz
288-
002,/path/to/002/target_image.nii.gz,/path/to/002/prediction_image.nii.gz,/path/to/002/brain_mask.nii.gz
289-
...
290-
```
285+
1. BraTS Segmentation Metrics
286+
287+
To generate annotation to annotation metrics for BraTS segmentation tasks [[ref](https://www.synapse.org/brats)], ensure that the config has `problem_type: segmentation_brats`, and the CSV can be in the same format as segmentation:
288+
289+
```csv
290+
SubjectID,Target,Prediction
291+
001,/path/to/001/target_image.nii.gz,/path/to/001/prediction_image.nii.gz
292+
002,/path/to/002/target_image.nii.gz,/path/to/002/prediction_image.nii.gz
293+
...
294+
```
295+
296+
2. BraTS Synthesis Metrics
297+
298+
To generate image to image metrics for synthesis tasks (including for the BraTS synthesis tasks [[1](https://www.synapse.org/#!Synapse:syn51156910/wiki/622356), [2](https://www.synapse.org/#!Synapse:syn51156910/wiki/622357)]), ensure that the config has `problem_type: synthesis`, and the CSV can be in the same format as segmentation (note that the `Mask` column is optional):
299+
300+
```csv
301+
SubjectID,Target,Prediction,Mask
302+
001,/path/to/001/target_image.nii.gz,/path/to/001/prediction_image.nii.gz,/path/to/001/brain_mask.nii.gz
303+
002,/path/to/002/target_image.nii.gz,/path/to/002/prediction_image.nii.gz,/path/to/002/brain_mask.nii.gz
304+
...
305+
```
291306

292307

293308
## Parallelize the Training

setup.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,6 @@
44

55

66
import sys, re, os
7-
8-
97
from setuptools import setup, find_packages
108

119

@@ -33,7 +31,8 @@
3331
]
3432

3533
# Any extra files should be located at `GANDLF` module folder (not in repo root)
36-
extra_files = ["logging_config.yaml"]
34+
extra_files_root = ["logging_config.yaml"]
35+
extra_files_metrics = ["panoptica_config_brats.yaml"]
3736
toplevel_package_excludes = ["testing*"]
3837

3938
# specifying version for `black` separately because it is also used to [check for lint](https://github.com/mlcommons/GaNDLF/blob/master/.github/workflows/black.yml)
@@ -88,6 +87,7 @@
8887
"openslide-python==1.4.1",
8988
"lion-pytorch==0.2.2",
9089
"pydantic==2.10.6",
90+
"panoptica>=1.3.3",
9191
]
9292

9393
if __name__ == "__main__":
@@ -140,7 +140,10 @@
140140
long_description=readme,
141141
long_description_content_type="text/markdown",
142142
include_package_data=True,
143-
package_data={"GANDLF": extra_files},
143+
package_data={
144+
"GANDLF": extra_files_root,
145+
"GANDLF.metrics": extra_files_metrics,
146+
},
144147
keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch",
145148
zip_safe=False,
146149
)

testing/test_full.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3143,6 +3143,45 @@ def test_generic_cli_function_metrics_cli_rad_nd():
31433143

31443144
sanitize_outputDir()
31453145

3146+
# this is for the brats segmentation metrics test
3147+
problem_type = "segmentation_brats"
3148+
reference_image_file = os.path.join(
3149+
inputDir, "metrics", "brats", "reference.nii.gz"
3150+
)
3151+
prediction_image_file = os.path.join(
3152+
inputDir, "metrics", "brats", "prediction.nii.gz"
3153+
)
3154+
subject_id = "brats_subject_1"
3155+
# write to a temporary CSV file
3156+
df = pd.DataFrame(
3157+
{
3158+
"SubjectID": [subject_id],
3159+
"Prediction": [prediction_image_file],
3160+
"Target": [reference_image_file],
3161+
}
3162+
)
3163+
temp_infer_csv = os.path.join(outputDir, "temp_csv.csv")
3164+
df.to_csv(temp_infer_csv, index=False)
3165+
3166+
# read and initialize parameters for specific data dimension
3167+
parameters = ConfigManager(
3168+
testingDir + "/config_segmentation.yaml", version_check_flag=False
3169+
)
3170+
parameters["modality"] = "rad"
3171+
parameters["patch_size"] = patch_size["3D"]
3172+
parameters["model"]["dimension"] = 3
3173+
parameters["verbose"] = False
3174+
temp_config = write_temp_config_path(parameters)
3175+
3176+
output_file = os.path.join(outputDir, "output_single-csv.json")
3177+
generate_metrics_dict(temp_infer_csv, temp_config, output_file)
3178+
3179+
assert os.path.isfile(
3180+
output_file
3181+
), "Metrics output file was not generated for single-csv input"
3182+
3183+
sanitize_outputDir()
3184+
31463185

31473186
# def test_generic_deploy_metrics_docker():
31483187
# print("50: Testing deployment of a metrics generator to Docker")

0 commit comments

Comments
 (0)