Skip to content

set logger output to INFO #17

set logger output to INFO

set logger output to INFO #17

name: Content Writer Evaluation
on:
push:
branches:
- release
workflow_dispatch:
permissions:
id-token: write
contents: read
models: read
jobs:
run-action:
name: Run Azure AI Foundry Agent Evaluations
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to Azure
uses: azure/login@v2
with:
client-id: ${{ secrets.AZURE_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: store eval data
id: store-eval-data
run: |
DATA_FILE=$(cat data/content-writer.json)
echo DATA_FILE=$DATA_FILE >> $GITHUB_OUTPUT
- name: GitHub Models Eval Summary
id: inference
uses: actions/ai-inference@v1
with:
max-tokens: 300
system-prompt: |
You are an AI assistant that is knowledgeable about the evaluation of AI agents in Azure AI Foundry.
Your task is to analyze the evaluation criteria and summarize the evaluation process.
Your summary should target someone who is not familiar with the topic.
prompt: |
Summarize the evaluation criteria used in Azure AI Foundry to assess AI agent performance, using the eval data from ${{ steps.store-eval-data.outputs.DATA_FILE }}.
Keep it under 150 words.
Use bullet points for clarity.
Focus on what is evaluated and why it matters for AI development and deployment.
- name: Add Model Eval Summary to Job Summary
id: output
run: |
echo "## Azure AI Foundry: Agent Evaluation Overview" >> $GITHUB_STEP_SUMMARY
echo "${{ steps.inference.outputs.response }}" >> $GITHUB_STEP_SUMMARY
- name: Azure AI Agent Evaluation
uses: microsoft/ai-agent-evals@v1-beta
with:
deployment-name: "gpt-4o-mini"
azure-aiproject-connection-string: ${{ secrets.AZURE_AI_PROJECT_CONNECTION_STRING }}
agent-ids: ${{ secrets.CONTENT_AGENT_IDS }}
# evaluation-result-view: "all-scores"
data-path: "data/agent-evals.json"