Skip to content

Commit afbab76

Browse files
committed
Quickstart example comments
1 parent aa2420e commit afbab76

File tree

1 file changed

+6
-3
lines changed

1 file changed

+6
-3
lines changed

tests/pytest/test_markdown_highlighting.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,9 @@ def markdown_dataset_to_evaluation_row(data: List[Dict[str, Any]]) -> List[Evalu
2525
]
2626

2727

28-
def markdown_highlighting_evaluate(messages: List[Message], ground_truth=None, **kwargs) -> EvaluateResult:
28+
def markdown_format_evaluate(messages: List[Message], ground_truth=None, **kwargs) -> EvaluateResult:
2929
"""
30-
Evaluation function that checks if the model's response contains the required number of highlighted sections.
30+
Evaluation function that checks if the model's response contains the required number of formatted sections.
3131
"""
3232

3333
assistant_response = messages[-1].content
@@ -39,6 +39,9 @@ def markdown_highlighting_evaluate(messages: List[Message], ground_truth=None, *
3939
)
4040

4141
required_highlights = int(ground_truth)
42+
43+
# Check if the response contains the required number of formatted sections
44+
# e.g. **bold** or *italic*
4245

4346
actual_count = 0
4447
highlights = re.findall(r"\*[^\n\*]*\*", assistant_response)
@@ -78,4 +81,4 @@ def test_markdown_highlighting_evaluation(input_dataset, input_params, model):
7881
"""
7982
Test markdown highlighting validation using batch mode with evaluate().
8083
"""
81-
return evaluate(input_dataset, markdown_highlighting_evaluate)
84+
return evaluate(input_dataset, markdown_format_evaluate)

0 commit comments

Comments
 (0)