From a8dfd52d2395ede089008a2cd54a62647c5e7cc9 Mon Sep 17 00:00:00 2001 From: Hadar Date: Wed, 4 Jun 2025 11:13:34 +0300 Subject: [PATCH 1/4] add solutions tab --- docs/solutions/_category_.json | 6 ++++++ docs/solutions/another.md | 0 docs/solutions/sidebar.ts | 9 +++++++++ docs/solutions/solutions.md | 6 ++++++ docusaurus.config.js | 9 ++++++++- sidebars.js | 4 ++++ 6 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 docs/solutions/_category_.json create mode 100644 docs/solutions/another.md create mode 100644 docs/solutions/sidebar.ts create mode 100644 docs/solutions/solutions.md diff --git a/docs/solutions/_category_.json b/docs/solutions/_category_.json new file mode 100644 index 0000000000..6d7f7900ec --- /dev/null +++ b/docs/solutions/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 0, + "className": "hidden", + "collapsible": false, + "collapsed": false +} diff --git a/docs/solutions/another.md b/docs/solutions/another.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/solutions/sidebar.ts b/docs/solutions/sidebar.ts new file mode 100644 index 0000000000..ed3a8af34c --- /dev/null +++ b/docs/solutions/sidebar.ts @@ -0,0 +1,9 @@ +import type { SidebarsConfig } from "@docusaurus/plugin-content-docs"; + +const sidebar: SidebarsConfig = { + solutionssidebar: [ + {type: 'autogenerated', dirName: 'solutions'}, + ], +}; + +export default sidebar.solutionssidebar; diff --git a/docs/solutions/solutions.md b/docs/solutions/solutions.md new file mode 100644 index 0000000000..30aafa804f --- /dev/null +++ b/docs/solutions/solutions.md @@ -0,0 +1,6 @@ +--- +sidebarClassName: hidden +sidebar_label: All solutions +--- + +# Solutions \ No newline at end of file diff --git a/docusaurus.config.js b/docusaurus.config.js index 1df1c0f22b..6749bef359 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -100,7 +100,7 @@ const config = { label: "Home", position: "left", className: "header-home-link", - activeBaseRegex: "^((?!api-reference|guides).)*$", + activeBaseRegex: "^((?!api-reference|guides|solutions).)*$", }, { to: "/api-reference/port-api", @@ -116,6 +116,13 @@ const config = { className: "header-guides-link", activeBasePath: "guides", }, + { + to: "/solutions", + label: "Solutions", + position: "left", + className: "header-solutions-link", + activeBasePath: "solutions", + }, { to: "https://github.com/port-labs/port-docs", position: "right", diff --git a/sidebars.js b/sidebars.js index f24921bca2..ad03308e2d 100644 --- a/sidebars.js +++ b/sidebars.js @@ -14,6 +14,7 @@ import { ClassNames } from '@emotion/react'; import apiSidebar from './docs/api-reference/sidebar.ts'; import guidessidebar from './docs/guides/sidebar.ts'; +import solutionssidebar from './docs/solutions/sidebar.ts'; /** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ const sidebars = { @@ -24,6 +25,9 @@ const sidebars = { ], "guides-sidebar": [ guidessidebar, + ], + "solutions-sidebar": [ + solutionssidebar, ] // But you can create a sidebar manually /* From 2be47d1111d8fe23d65dbf495783de207d3b9408 Mon Sep 17 00:00:00 2001 From: barittahport Date: Tue, 10 Jun 2025 11:16:32 +0300 Subject: [PATCH 2/4] Update solutions documentation: add engineering360 and update config --- docs/solutions/another.md | 0 .../engineering360/Overview/Overview.md | 78 ++++++++++ .../engineering360/Overview/_category_.json | 4 + .../engineering360/Quickstart/Quickstart.md | 91 +++++++++++ .../engineering360/Quickstart/_category_.json | 4 + docs/solutions/engineering360/_category_.json | 4 + .../advanced-configuration/_category_.json | 4 + .../advanced-configuration.md | 19 +++ .../customizing-dora-metrics/_category_.json | 4 + .../custom-api/_category_.json | 4 + .../custom-api/custom-api.md | 147 ++++++++++++++++++ .../customizing-dora-metrics.md | 3 + .../github/_category_.json | 4 + .../customizing-dora-metrics/github/github.md | 42 +++++ .../gitlab/_category_.json | 4 + .../customizing-dora-metrics/gitlab/gitlab.md | 102 ++++++++++++ .../jira/_category_.json | 4 + .../customizing-dora-metrics/jira/jira.md | 111 +++++++++++++ .../pagerduty/_category_.json | 4 + .../pagerduty/pagerduty.md | 36 +++++ .../customizing-surveys/_category_.json | 4 + .../customizing-surveys.md | 3 + .../_category_.json | 4 + .../how-to-customize-your-survey.md | 1 + .../what-gets-installed/_category_.json | 4 + .../what-gets-installed.md | 66 ++++++++ .../_category_.json | 4 + .../_category_.json | 4 + .../common-pitfalls-in-metrics.md | 27 ++++ .../_category_.json | 4 + .../designing-actionable-surveys.md | 26 ++++ .../dora-metrics-explained/_category_.json | 4 + .../balancing-the-metrics/_category_.json | 4 + .../balancing-the-metrics.md | 19 +++ .../change-failure-rate/_category_.json | 4 + .../change-failure-rate.md | 17 ++ .../deployment-frequency/_category_.json | 4 + .../deployment-frequency.md | 15 ++ .../dora-metrics-explained.md | 4 + .../lead-time-for-change/_category_.json | 4 + .../lead-time-for-change.md | 16 ++ .../mttr/_category_.json | 4 + .../dora-metrics-explained/mttr/mttr.md | 17 ++ .../_category_.json | 4 + .../engineering-metrics-what-matters.md | 33 ++++ .../_category_.json | 4 + .../ports-philosophy-beyond-numbers.md | 16 ++ .../the-state-of-engineering-intelligence.md | 14 ++ .../engineering360/use-cases/_category_.json | 4 + .../use-cases/use-case-1/_category_.json | 4 + .../use-cases/use-case-1/use-case-1.md | 1 + .../use-cases/use-case-2/_category_.json | 4 + .../use-cases/use-case-2/use-case-2.md | 1 + .../use-cases/use-case-3/_category_.json | 4 + .../use-cases/use-case-3/use-case-3.md | 1 + .../engineering360/use-cases/use-cases.md | 1 + docusaurus.config.js | 6 +- 57 files changed, 1022 insertions(+), 3 deletions(-) delete mode 100644 docs/solutions/another.md create mode 100644 docs/solutions/engineering360/Overview/Overview.md create mode 100644 docs/solutions/engineering360/Overview/_category_.json create mode 100644 docs/solutions/engineering360/Quickstart/Quickstart.md create mode 100644 docs/solutions/engineering360/Quickstart/_category_.json create mode 100644 docs/solutions/engineering360/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/advanced-configuration.md create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/custom-api/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/custom-api/custom-api.md create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/customizing-dora-metrics.md create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/github/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/github/github.md create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/gitlab/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/gitlab/gitlab.md create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/jira/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/jira/jira.md create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/pagerduty/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/pagerduty/pagerduty.md create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-surveys/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-surveys/customizing-surveys.md create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-surveys/how-to-customize-your-survey/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/customizing-surveys/how-to-customize-your-survey/how-to-customize-your-survey.md create mode 100644 docs/solutions/engineering360/advanced-configuration/what-gets-installed/_category_.json create mode 100644 docs/solutions/engineering360/advanced-configuration/what-gets-installed/what-gets-installed.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/common-pitfalls-in-metrics.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/designing-actionable-surveys/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/designing-actionable-surveys/designing-actionable-surveys.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/balancing-the-metrics/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/balancing-the-metrics/balancing-the-metrics.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/change-failure-rate/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/change-failure-rate/change-failure-rate.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/deployment-frequency/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/deployment-frequency/deployment-frequency.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/dora-metrics-explained.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/lead-time-for-change/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/lead-time-for-change/lead-time-for-change.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/mttr/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/mttr/mttr.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/engineering-metrics-what-matters.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/ports-philosophy-beyond-numbers/_category_.json create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/ports-philosophy-beyond-numbers/ports-philosophy-beyond-numbers.md create mode 100644 docs/solutions/engineering360/the-state-of-engineering-intelligence/the-state-of-engineering-intelligence.md create mode 100644 docs/solutions/engineering360/use-cases/_category_.json create mode 100644 docs/solutions/engineering360/use-cases/use-case-1/_category_.json create mode 100644 docs/solutions/engineering360/use-cases/use-case-1/use-case-1.md create mode 100644 docs/solutions/engineering360/use-cases/use-case-2/_category_.json create mode 100644 docs/solutions/engineering360/use-cases/use-case-2/use-case-2.md create mode 100644 docs/solutions/engineering360/use-cases/use-case-3/_category_.json create mode 100644 docs/solutions/engineering360/use-cases/use-case-3/use-case-3.md create mode 100644 docs/solutions/engineering360/use-cases/use-cases.md diff --git a/docs/solutions/another.md b/docs/solutions/another.md deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/solutions/engineering360/Overview/Overview.md b/docs/solutions/engineering360/Overview/Overview.md new file mode 100644 index 0000000000..1d8b811a09 --- /dev/null +++ b/docs/solutions/engineering360/Overview/Overview.md @@ -0,0 +1,78 @@ + + +# Overview + +### Why Software Engineering Intelligence (SEI) Matters + +At Port, we recognize that engineering organizations thrive when decisions are grounded in clarity and precision. Software Engineering Intelligence (SEI) offers the visibility needed to transform intuition into insight, empowering teams to optimize software delivery and enhance the developer experience simultaneously. + +Engineering360 is Port’s SEI solution, designed to combine objective engineering performance data—such as DORA metrics—with qualitative developer sentiment gathered through targeted surveys. This approach addresses two pivotal questions every engineering leader should consider: + +- How effectively is our engineering organization delivering software? +- How satisfied and empowered are our developers in their daily work? + +### Why Combine DORA Metrics with Developer Sentiment? +DORA metrics—Deployment Frequency, Lead Time, Change Failure Rate, and Mean Time to Recovery—offer vital data about the health and efficiency of your software delivery processes. However, they only tell part of the story. To genuinely improve performance, you must understand the "why" behind these metrics. + +Developer sentiment surveys complement DORA metrics by shedding light on the experiences and challenges your teams face daily. They reveal human factors affecting performance, such as process friction, tooling bottlenecks, or team dynamics that pure data metrics alone cannot capture. + +For example, if deployment frequency declines and surveys simultaneously indicate widespread frustration with unreliable CI/CD pipelines, you can directly correlate these insights to identify and resolve the underlying issues causing these disruptions. + +Engineering360 bridges these perspectives, enabling engineering leaders to: +- Identify not just what is happening, but why. +- Take proactive, targeted actions based on reliable data. +- Improve developer satisfaction, directly enhancing productivity, retention, and engagement. + +### From Insight to Action—All Within Your Internal Developer Portal + +While numerous tools exist for measuring engineering health, from dashboards to developer analytics platforms, Port aims to go further. With Engineering360, insights are transformed into actions within your internal developer portal. + +By integrating DORA metrics and developer sentiment directly into your portal, Engineering360 allows you to: + +#### 1. Discover your team’s blockers +Use a combination of engineering metrics and developer feedback to uncover friction points that are slowing your teams down. +#### 2. Focus on the biggest bottleneck +Prioritize the most impactful blockers by correlating quantitative data with real survey responses, ensuring your efforts target what matters most. +#### 3. Clear them inside Port +Resolve bottlenecks directly within your portal using Port’s automations, self-service actions, or custom workflows—making it easy to drive meaningful change as part of everyday work. +#### 4. Show your velocity gains +Track and communicate improvements in delivery metrics and developer satisfaction with out-of-the-box dashboards, proving ROI and motivating further progress. + +This approach ensures that improvements are not only identified but also implemented effectively, enhancing both visibility and velocity. + +For instance, consider a scenario where deployment frequency declines—a key DORA metric indicating potential issues in the delivery pipeline. Simultaneously, developer surveys reveal growing frustration with flaky CI/CD pipelines. Engineering360 correlates these insights, pinpointing the unreliable pipelines as the root cause. Within the internal developer portal, teams can access self-service actions to rerun failed pipelines or retrieve logs, while automated alerts notify the DevOps team when pipeline failures exceed a certain threshold, prompting immediate investigation. + +In another case, an increase in lead time for changes is observed, coupled with developer feedback highlighting delays in code reviews. Engineering360 identifies this bottleneck and facilitates a solution through the portal by introducing a dashboard displaying pending pull requests and their statuses. Automated reminders are sent to reviewers for PRs exceeding a set timeframe, ensuring timely code reviews and reducing lead times. + +By unifying insights and actions, Engineering360 empowers teams to move beyond reactive problem-solving towards proactive improvement, ensuring that developer experience and software delivery performance continually evolve in tandem. + +### Who Benefits from Engineering360? +Engineering360 supports stakeholders across the engineering organization: +- VPs of Engineering gain clear visibility into organizational health, enabling strategic decisions backed by actionable insights. + +- Platform Engineers leverage detailed performance and sentiment data to optimize tooling, infrastructure, and processes that directly impact developers’ daily workflows. + + +- Developer Experience (DevEx) Leads use qualitative insights to understand developers' pain points deeply and craft tailored interventions that boost morale, productivity, and team cohesion. + +### When to Introduce Engineering360 into Your Portal Journey +Introducing Engineering360 is recommended once your Internal Developer Portal (IDP) foundation is in place—after onboarding your services, integrations, and basic workflows. It's the natural next step in your maturity, transitioning from cataloging and basic operations to actively measuring, analyzing, and continuously improving engineering practices. +Integrating Engineering360 early in your portal journey ensures a culture of transparency, accountability, and continuous improvement from the outset, helping your organization scale effectively and sustainably. + +### Key Outcomes and ROI + +With Engineering360, engineering leaders can expect to achieve: + +- Reduced friction: Quickly identify and remove process bottlenecks, enabling faster software delivery. + + +- Improved stability and reliability: Leverage data-driven retrospectives to lower change failure rates and accelerate incident recovery. + + +- Higher developer satisfaction and productivity: Address developers' actual pain points, enhancing job satisfaction and reducing attrition. + + +- Continuous improvement: Establish a repeatable, measurable cycle of improvement that aligns developer experience with organizational performance goals. + + +Engineering360 is your pathway to a developer-centric engineering culture. diff --git a/docs/solutions/engineering360/Overview/_category_.json b/docs/solutions/engineering360/Overview/_category_.json new file mode 100644 index 0000000000..4471655de5 --- /dev/null +++ b/docs/solutions/engineering360/Overview/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Overview", + "position": 1 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/Quickstart/Quickstart.md b/docs/solutions/engineering360/Quickstart/Quickstart.md new file mode 100644 index 0000000000..94b89b461d --- /dev/null +++ b/docs/solutions/engineering360/Quickstart/Quickstart.md @@ -0,0 +1,91 @@ +# Quickstart +Ready to get started with Engineering360? This quickstart guide will walk you through setting up DORA metrics and developer surveys in Port. We’ll cover the basic setup steps, prerequisites, and how to see your data flowing in. By the end, you’ll have all the foundational components in place and your Engineering360 dashboards up and running. + +## DORA Metrics +### Prerequisites +Before installing Engineering360 DORA metrics components, ensure the following prerequisites are in place: +- Admin permissions (in order to install the experience and execute self-service actions). +- A connected GitHub integration (for deployment tracking) or access to custom API setup. +- A connected PagerDuty integration (for incident tracking) or access to custom API setup. + +### Install the experience + +1. Go to your software catalog. + +2. Click on the + New button in the left sidebar, then choose New experience. + +3. In the modal, choose New DORA metrics dashboard. + +4. Choose a title for your dashboard and click Create. + +*put here image* + +### Configure your deployments & incidents +After installation, you need to: + +1. Configure Deployments: + +- Choose the relevant deployment method according to your organization's definition of a deployment (Merged PRs, GitHub Workflows, GitHub Releases, Github Deployments, Custom API, etc). +- Apply filters (target branch, PR labels, etc) to align with your process. + +2. Configure Incidents: + +- Choose the relevant incident method according to your organization's definition of an incident (PagerDuty, Custom API, etc). +- Connect to a source like PagerDuty or configure via Custom API. + +### Track results +Navigate to the DORA metrics dashboard created in the "DORA setup & dashboard" folder in your software catalog. + +Once your data starts accumulating, you will see visualized metrics including: + +- Deployment frequency. +- Lead time for changes. +- Mean time to recovery (MTTR). +- Change failure rate. + +These metrics give you a high-level view of your engineering velocity and reliability, helping your team identify areas for improvement. + + +## Surveys +### Prerequisites +Before installing Engineering360 surveys components, ensure the following prerequisites are in place: +- Admin permissions (in order to install the experience and execute self-service actions). +- A communication channel (e.g., Slack, email) to distribute survey links. + +### Install the experience + +1. Go to your software catalog. + +2. Click on the + New button in the left sidebar, then choose New experience. + +3. In the modal, choose New Survey. + +4. Choose a survey type and give it a unique identifier (this will allow you to run this survey multiple times and track its results over time). + +*put here image* + +### Configure & distribute the survey +After installation, you need to: + +1. Adjust Survey Visibility: + +- Go to the self-service page of your portal. +- Find the survey, then click the ... button and select Edit. +- Go to the Permissions tab and configure who can respond to the survey (i.e. who can execute this action) + +2. Distribute the Survey: + +- Hover over the action card, then click on the chain icon to copy the link to the survey. + +*put here image* + +### View results +Navigate to the survey dashboard created in the "DORA setup & dashboard" folder in your software catalog. + +Once responses are submitted, you'll see developer sentiment and trends visualized in real time. + +*put here image* + +These insights help platform and engineering teams understand friction points, monitor developer experience, and prioritize where to invest future resources. + + diff --git a/docs/solutions/engineering360/Quickstart/_category_.json b/docs/solutions/engineering360/Quickstart/_category_.json new file mode 100644 index 0000000000..f2a67264a9 --- /dev/null +++ b/docs/solutions/engineering360/Quickstart/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Quickstart", + "position": 2 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/_category_.json b/docs/solutions/engineering360/_category_.json new file mode 100644 index 0000000000..8ad1dc1ada --- /dev/null +++ b/docs/solutions/engineering360/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Engineering360", + "position": 1 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/_category_.json b/docs/solutions/engineering360/advanced-configuration/_category_.json new file mode 100644 index 0000000000..9fa572234c --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Advanced Configuration", + "position": 4 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/advanced-configuration.md b/docs/solutions/engineering360/advanced-configuration/advanced-configuration.md new file mode 100644 index 0000000000..57e0bef1d9 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/advanced-configuration.md @@ -0,0 +1,19 @@ +# Advanced configuration +Go beyond the basics and make Engineering360 fit your organization’s unique workflows. + +This section covers everything you need to know about advanced setup, integration options, and customizing how DORA metrics and developer surveys work in Port. Here you’ll find: + +- What Gets Installed: See all the components, blueprints, dashboards, and automations that Engineering360 provisions in your portal. + + +- Customizing DORA Metrics: Step-by-step guides for integrating with GitHub, GitLab, Jira, PagerDuty, or your own systems. + + +- Customizing Developer Surveys: How to tailor surveys, set permissions, and boost engagement. + + +- API and Custom Sources: Bring in your own data and trigger workflows unique to your teams. + + +Use the links in the sidebar to jump straight to your topic of interest. + diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/_category_.json b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/_category_.json new file mode 100644 index 0000000000..717695d49c --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Customizing DORA Metrics", + "position": 2 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/custom-api/_category_.json b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/custom-api/_category_.json new file mode 100644 index 0000000000..948a0d164e --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/custom-api/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Custom API", + "position": 5 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/custom-api/custom-api.md b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/custom-api/custom-api.md new file mode 100644 index 0000000000..04dd204c16 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/custom-api/custom-api.md @@ -0,0 +1,147 @@ +# Custom API Integration for DORA Metrics + +If your team relies on custom tooling, internal scripts, or external systems, Port’s API integration enables you to track both deployments and incidents directly. This flexible approach ensures that your DORA metrics accurately reflect your unique workflows, regardless of your underlying technologies or systems. + +### Prerequisites +- An installed DORA Metrics experience. +- Admin permissions in Port. + +### Get started + +**1. Obtain your client ID and client secret** +- In Port, click the ... menu in the top right corner. +- Select Credentials. +- Copy your client ID and client secret for API authentication. + +**2. Create a deployment entity (Python)** +Use the client credentials from the previous step to retrieve an API access token, +Then send a POST request to the Port API to create a deployment entity. + +Below is an example using Python: +```python +import requests + +# Consider using environment variables instead of the hardcoded values +CLIENT_ID = "YOUR_CLIENT_ID" +CLIENT_SECRET = "YOUR_CLIENT_SECRET" +PORT_API_URL = "https://api.port.io" + +# Step 1: Get the access token + +auth_response = requests.post( + f"{PORT_API_URL}/v1/auth/access_token", + json={ + "clientId": CLIENT_ID, + "clientSecret": CLIENT_SECRET + } +) + +auth_response.raise_for_status() +access_token = auth_response.json().get("accessToken") + +# Step 2: Use the access token to create a deployment entity + +blueprint_id = "dora_deployment" + +entity = { + "identifier": "some_deployment_1", # Unique identifier for the deployment + "title": "Some Deployment", + "properties": { + "lead_time_hours": 0, + "deployment_time": "2025-01-01T00:00:00.000Z", + "deployment_type": "Custom API", + "environment": "production", + "status": "success" + }, + "relations": { + "group": "all_teams" + } +} + +headers = { + "Authorization": f"Bearer {access_token}" +} + +res = requests.post( + f"{PORT_API_URL}/v1/blueprints/{blueprint_id}/entities?upsert=true", + json=entity, + headers=headers +) + +res.raise_for_status() +``` + +**3. Create an incident entity (Python)** + +Similarly, create incidents using Port's API: + +```python +import requests + +# Consider using environment variables instead of the hardcoded values +CLIENT_ID = "YOUR_CLIENT_ID" +CLIENT_SECRET = "YOUR_CLIENT_SECRET" +PORT_API_URL = "https://api.port.io" + +# Step 1: Get the access token + +auth_response = requests.post( + f"{PORT_API_URL}/v1/auth/access_token", + json={ + "clientId": CLIENT_ID, + "clientSecret": CLIENT_SECRET + } +) + +auth_response.raise_for_status() +access_token = auth_response.json().get("accessToken") + +# Step 2: Use the access token to create an incident entity + +blueprint_id = "dora_incident" + +entity = { + "identifier": "some_incident_1", + "title": "Some Incident", + "properties": { + "incident_type": "Custom incident", + "description": "some description", + "status": "resolved", + "incident_url": "https://example.com", + "created_at": "2025-01-01T00:00:00.000Z", + "resolved_at": "2025-01-01T00:00:00.000Z", + "priority": "low", + "time_to_resolve": 0, + "urgency": "low" + }, + "relations": { + "group": "all_teams" + } +} + +headers = { + "Authorization": f"Bearer {access_token}" +} + +res = requests.post( + f"{PORT_API_URL}/v1/blueprints/{blueprint_id}/entities?upsert=true", + json=entity, + headers=headers +) + +res.raise_for_status() +``` + +### Advanced Use Cases + +**Automated Event Tracking**: Incorporate API calls into existing CI/CD pipelines or incident management scripts for automatic tracking. + +**Contextual Insights**: Add relations to teams or services to enhance analytics within your DORA dashboards. + +### Tips & Best practices + +Use environment variables or a secrets manager for credentials—avoid hardcoding sensitive data in scripts. +Refer to the Port API reference documentation for more advanced use cases, such as updating or deleting entities. + +With this custom API approach, you can bring any deployment or incident event into Port for unified, accurate DORA tracking—no matter where or how it happens in your software delivery pipeline. + diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/customizing-dora-metrics.md b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/customizing-dora-metrics.md new file mode 100644 index 0000000000..ddd877c1ac --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/customizing-dora-metrics.md @@ -0,0 +1,3 @@ +# Customizing DORA metrics + +Engineering360 gives you flexibility in how DORA metrics are tracked and managed in your portal. On this page, you’ll find guidance for tailoring DORA metrics data collection to your stack—whether you want to use GitHub, GitLab, Jira, PagerDuty, or custom sources for deployments and incidents. Explore the subpages below for step-by-step integration and configuration guides, and learn how to map deployments and incidents to your own services and teams for actionable insights that truly reflect your organization workflows. \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/github/_category_.json b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/github/_category_.json new file mode 100644 index 0000000000..8904f8cbd7 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/github/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "GitHub integration", + "position": 1 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/github/github.md b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/github/github.md new file mode 100644 index 0000000000..e38be0d987 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/github/github.md @@ -0,0 +1,42 @@ +# GitHub integration for DORA metrics + +Port provides a flexible, self-service way to track DORA metrics from your GitHub repositories—no manual configuration needed. You can choose from several out-of-the-box actions that let you define what counts as a “deployment” for your organization, all directly in the portal UI. + +### Prerequisites +- An installed GitHub integration. +- An installed DORA Metrics experience +- Admin permissions in Port to modify integrations and mappings. + + +### Choosing Your Deployment Method +The definition of a “deployment” can vary between engineering organizations. With Port’s GitHub integration, you can select the mapping that best matches your workflow using self-service actions. These actions allow you to start ingesting deployment data with just a few clicks. + +**Available deployment triggers include:** +- **Merged Pull Requests:** Treat every merged PR as a deployment event. +- **GitHub Workflow Runs**: Use successful GitHub Actions workflow runs as deployments (ideal for CI/CD pipelines). +- **GitHub Releases**: Track deployments based on tagged version releases. +- **GitHub Deployments**: Track deployments triggered via GitHub’s Deployment API. + +You can create multiple deployment methods by using the same self-service actions repeatedly. + + +### How to Configure +1. **Go to the “1. Set up your deployments” configuration page in your Port portal**. + +2. **Locate the “Configure your deployment method” section**. + +3. **Choose the self-service action** that matches your deployment definition (e.g., “Create a deployment for every GitHub pull request”). + +4. **Apply the desired filters**—for example, you might want to track only deployments in a certain environment, branch, or with specific labels. Adjust the filters in the action modal to match your team’s workflow, then save your changes. + +Once configured, Port will automatically ingest deployment events from GitHub and update your DORA dashboards—without any additional setup. + +### Customization and Advanced Mapping +If your workflow is more complex than that, you can further refine what counts as a deployment by tweaking the integration mapping or by combining multiple mapping blocks. + +For full control, you can go directly to the mapping of your GitHub integration and manually adjust it. This allows you to add or modify relations to services and teams, ensuring each deployment is linked to the right context, or create more complex filters. With this setup, you can filter and break down your DORA metrics by team or service—making it easier to pinpoint issues, compare performance, and generate more relevant insights for your organization. + +### Next Steps +You can continue refining your integration as your workflows change, or check out guides for other tools to build a comprehensive view of your delivery process. + +With your GitHub integration set up, you now have clear visibility into your software delivery metrics, enabling data-driven improvements across your teams. diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/gitlab/_category_.json b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/gitlab/_category_.json new file mode 100644 index 0000000000..1001547e23 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/gitlab/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "GitLab integration", + "position": 2 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/gitlab/gitlab.md b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/gitlab/gitlab.md new file mode 100644 index 0000000000..ab5eba67b0 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/gitlab/gitlab.md @@ -0,0 +1,102 @@ +# GitLab Integration for DORA Metrics + +Port enables you to track DORA metrics from your GitLab repositories by directly configuring your data mapping—no self-service actions required. You have full flexibility to define exactly what counts as a “deployment” for your organization, whether it’s a merged merge request or a completed CI/CD job. + +### Prerequisites +- An installed GitLab integration. +- An installed DORA Metrics experience. +- Admin permissions in Port to modify integrations and mappings. + +### Configuring Deployments from GitLab + +With Port’s GitLab integration, you manually define the mapping logic to ingest the right deployment events into Port’s DORA framework. You can map either merge requests, jobs, or both, depending on how you define a “deployment”. + +1. Map merge requests and/or jobs as deployments + - Go to the Data Sources then click on the GitLab integration + - Add a new mapping to ingest GitLab merge requests as deployment entities. + + +:::info Technical note: how deployments are mapped +Keep in mind that the mapping ingests the deployments into the dora_deployment_event blueprint, for further information visit Create & track DORA metrics in your portal. +::: + +```yaml showLineNumbers +- kind: merge-request + selector: + query: 'true' + port: + entity: + mappings: + identifier: .id | tostring + title: .title + blueprint: '"dora_deployment_event"' + properties: + deployment_type: '"MR merged"' + deployment_time: .merged_at + leadTimeHours: >- + (.created_at as $createdAt | .merged_at as $mergedAt | ($createdAt + | sub("\\..*Z$"; "Z") | strptime("%Y-%m-%dT%H:%M:%SZ") | mktime) + as $createdTimestamp | ($mergedAt | if . == null then null else + sub("\\..*Z$"; "Z") | strptime("%Y-%m-%dT%H:%M:%SZ") | mktime end) + as $mergedTimestamp | if $mergedTimestamp == null then null else + (((($mergedTimestamp - $createdTimestamp) / 3600) * 100 | floor) / + 100) end) +``` + +- Alternatively, map jobs as deployments: + +```yaml showLineNumbers + - kind: job + selector: + query: 'true' + port: + entity: + mappings: + identifier: .id | tostring + title: .title // (.id | tostring) + blueprint: '"dora_deployment_event"' + properties: + deployment_type: '"GitLab Job"' + deployment_time: .finished_at + status: .status + leadTimeHours: >- + (.commit.committed_date as $createdAt | .finished_at as $mergedAt | + ($createdAt | sub("\\.\\d+(Z|\\+00:00)$"; "Z") | + strptime("%Y-%m-%dT%H:%M:%SZ") | mktime) as $createdTimestamp | + ($mergedAt | if . == null then null else sub("\\.\\d+(Z|\\+00:00)$"; "Z") + | strptime("%Y-%m-%dT%H:%M:%SZ") | mktime end) as $mergedTimestamp | + if $mergedTimestamp == null then null else + (((($mergedTimestamp - $createdTimestamp) / 3600) * 100 | floor) / 100) end) + +``` +- Save the mapping. Port will begin ingesting matching events as deployments. + +2. (Optional) Filtering or tagging + +### Filtering and Tagging + +You can further refine your configuration by applying filters or tags within your mapping: + +* **Example:** Only count deployments where the target branch is `main` and the state is `closed`: + + ```yaml + query: ([.state == "closed"] | any) and ([.target_branch == "main"] | any) + ``` + +This ensures you’re only tracking the deployments that matter most to your process. + +### Save and Test + +After saving your mapping, Port will begin ingesting the selected GitLab events as deployments. +**Test your configuration:** Trigger a new merge request and complete it, or run a deployment job. Then, check the relevant deployment blueprint in Port to verify the new deployment appears as expected. + +### Customization and Advanced Mapping + +For more advanced scenarios, you can combine multiple mappings or further customize properties to reflect your organization’s needs. You can also add or modify relationships to services and teams within your mapping to make your DORA dashboards even more insightful and actionable. + +### Next Steps + +As your workflows evolve, you can update your mapping or combine sources for a more complete view. For more integrations and advanced deployment mapping options, check out the additional guides in this section. + +With your GitLab integration configured, you now have accurate, actionable DORA metrics reflecting your team’s real delivery practices. + diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/jira/_category_.json b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/jira/_category_.json new file mode 100644 index 0000000000..f135346c90 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/jira/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Jira integration", + "position": 3 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/jira/jira.md b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/jira/jira.md new file mode 100644 index 0000000000..c3ccec30a9 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/jira/jira.md @@ -0,0 +1,111 @@ +# Jira Integration for DORA Metrics + +Port allows you to track DORA metrics using Jira issues, providing flexibility to define exactly what counts as a "deployment" or an "incident" in your workflow. You directly configure data mapping, ensuring accuracy without needing additional self-service actions. + +### Prerequisites + +* An installed Jira integration. +* An installed DORA Metrics experience. +* Admin permissions in Port to modify integrations and mappings. + +## Configure your deployments and incidents + +With Port’s Jira integration, you manually define the mapping logic to ingest Jira issues into Port’s DORA framework. You can map Jira issues as deployments, incidents, or both, based on your organization's definitions. + +1. **Map Jira issues as deployments**: + + * Navigate to **Data Sources** and select your Jira integration. + * Add a new mapping to ingest Jira issues as deployment entities. + + +:::info Technical note: how deployments are mapped +Mappings ingest Jira issues into the `dora_deployment_event` blueprint. For more details, see [Create & track DORA metrics in your portal](/guides/all/create-and-track-dora-metrics-in-your-portal). +::: + +```yaml showLineNumbers +- kind: issue + selector: + query: 'true' + jql: (statusCategory != Done) OR (created >= -4w) OR (updated >= -4w) + port: + entity: + mappings: + identifier: .key + title: .fields.summary + blueprint: '"dora_deployment_event"' + properties: + deployment_type: '"Jira Issue"' + deployment_time: .fields.resolutiondate + status: >- + if (.fields.resolutiondate != null) then "success" else "pending" end + lead_time_hours: >- + if (.fields.resolutiondate != null and .fields.created != null) + then ((.fields.resolutiondate[0:19] + "Z" | fromdateiso8601) - + (.fields.created[0:19] + "Z" | fromdateiso8601)) / 3600 else null end +``` + +2. **Map Jira issues as incidents**: + +```yaml showLineNumbers +- kind: issue + selector: + query: 'true' + jql: ((statusCategory != Done) OR (created >= -4w) OR (updated >= -4w)) + port: + entity: + mappings: + identifier: .key + title: .fields.summary + blueprint: '"dora_incident_event"' + properties: + incident_type: '"Jira Issue"' + description: .fields.description.content | .. | objects | select(.type? == "text") | .text + incident_url: (.self | split("/") | .[0:3] | join("/")) + "/browse/" + .key + created_at: .fields.created + resolved_at: .fields.resolutiondate + time_to_resolve: >- + if (.fields.resolutiondate != null and .fields.created != null) + then ((.fields.resolutiondate[0:19] + "Z" | fromdateiso8601) - + (.fields.created[0:19] + "Z" | fromdateiso8601)) / 3600 else null end +``` + + +You can map Jira custom fields to track additional incident information. Here's an example of mapping custom fields for urgency, status, and priority: + +```yaml +urgency: .fields.customfield_00000 +status: .fields.customfield_00000 +priority: .fields.customfield_00000 +``` + + +* **Save the mapping**: Port will immediately start ingesting matching Jira issues as deployments or incidents. + +### Filtering and Tagging + +Refine your mappings further using Jira Query Language (JQL). This lets you precisely define which issues should count towards your DORA metrics. + +**Example:** Track only deployments with specific issue types and statuses: + +```yaml +jql: project = "APP" AND issuetype in ("Deployment") AND status = "Done" +``` + +### Save and Test + +After saving your mappings, Port begins ingesting Jira events immediately. + +**Test your configuration:** + +* Create or resolve a Jira issue matching your mapping criteria. +* In Port, navigate to the relevant deployment or incident blueprint to confirm the new entities appear correctly. + +### Customization and Advanced Mapping + +For more complex setups, combine multiple mappings or customize additional properties to better reflect your organization's requirements. You can add or modify relations to services and teams within your mappings, enhancing the depth of insights provided by your DORA dashboards. + +### Next Steps + +As your workflows develop, update your mappings or incorporate additional sources for more comprehensive insights. Explore further integration possibilities and advanced mapping techniques in the additional guides provided. + +With Jira integration properly configured, you'll have precise, actionable DORA metrics reflecting your organization's actual operational practices. diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/pagerduty/_category_.json b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/pagerduty/_category_.json new file mode 100644 index 0000000000..b5b25181c8 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/pagerduty/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "PagerDuty integration", + "position": 4 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/pagerduty/pagerduty.md b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/pagerduty/pagerduty.md new file mode 100644 index 0000000000..88df6a2e5b --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-dora-metrics/pagerduty/pagerduty.md @@ -0,0 +1,36 @@ +# PagerDuty Integration for DORA Metrics + +Port offers a straightforward, self-service approach to track DORA metrics from your PagerDuty incidents. You can quickly define what counts as an “incident” for your organization using out-of-the-box action in the portal, all directly in the portal UI without any manual configuration needed. + +### Prerequisites +- An installed PagerDuty integration. +- An installed DORA Metrics experience. +- Admin permissions in Port to modify integrations and mappings. + +### How Incident Tracking Works + +With Port’s PagerDuty integration, all incidents from your connected PagerDuty account are ingested automatically. There’s no need to define custom filters or mapping rules—Port will bring in every PagerDuty incident as part of your DORA metrics dashboard. + +### How to Configure +1. Go to the “2. Set up your incidents” configuration page in your Port portal. + +2. Locate the “Configure your incident method” section. + +3. Select the self-service action to enable PagerDuty incident tracking. + +4. Execute the action. + +That’s it—Port will now automatically ingest all PagerDuty incidents and reflect them in your DORA dashboards. + +### Customization and Advanced Mapping + +If your incident workflow is more nuanced, you can further refine what counts as an incident by tweaking the integration mapping directly or combining multiple mapping blocks. + +For full control, you can go directly to the mapping of your PagerDuty integration and manually adjust it. This allows you to add or modify relations to specific services and teams, or create more granular filters (e.g., by severity, owning team, or tags). With this setup, you can analyze your DORA metrics by team or service, making it easier to identify bottlenecks, compare incident response performance, and generate actionable insights. + +### Next Steps + +You can continue refining your incident integration as your workflows evolve, or explore guides for integrating other tools to build a holistic view of your software delivery process. + +With your PagerDuty integration set up, you now have instant visibility into how incidents impact your DORA metrics, empowering your teams to drive operational improvements. + diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-surveys/_category_.json b/docs/solutions/engineering360/advanced-configuration/customizing-surveys/_category_.json new file mode 100644 index 0000000000..c917691888 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-surveys/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Customizing Surveys", + "position": 3 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-surveys/customizing-surveys.md b/docs/solutions/engineering360/advanced-configuration/customizing-surveys/customizing-surveys.md new file mode 100644 index 0000000000..3802c5930e --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-surveys/customizing-surveys.md @@ -0,0 +1,3 @@ +# Customizing developer surveys + +Engineering360 offers extensive flexibility in customizing developer surveys to match your organizational needs. On this page, you'll discover how to adapt survey configurations effectively within your portal—whether you're designing new surveys, modifying existing templates, or integrating feedback collection seamlessly into your workflows. Explore the subpages below for detailed guidance on creating, customizing, and distributing surveys, and learn how to link survey insights directly to specific teams, services, or initiatives for meaningful improvements tailored precisely to your organization's goals. diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-surveys/how-to-customize-your-survey/_category_.json b/docs/solutions/engineering360/advanced-configuration/customizing-surveys/how-to-customize-your-survey/_category_.json new file mode 100644 index 0000000000..e9d131881d --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-surveys/how-to-customize-your-survey/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "How to customize your survey", + "position": 1 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/customizing-surveys/how-to-customize-your-survey/how-to-customize-your-survey.md b/docs/solutions/engineering360/advanced-configuration/customizing-surveys/how-to-customize-your-survey/how-to-customize-your-survey.md new file mode 100644 index 0000000000..cd216356df --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/customizing-surveys/how-to-customize-your-survey/how-to-customize-your-survey.md @@ -0,0 +1 @@ +how to custom \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/what-gets-installed/_category_.json b/docs/solutions/engineering360/advanced-configuration/what-gets-installed/_category_.json new file mode 100644 index 0000000000..2c703e1469 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/what-gets-installed/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "What get's installed", + "position": 1 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/advanced-configuration/what-gets-installed/what-gets-installed.md b/docs/solutions/engineering360/advanced-configuration/what-gets-installed/what-gets-installed.md new file mode 100644 index 0000000000..62dddc4ab3 --- /dev/null +++ b/docs/solutions/engineering360/advanced-configuration/what-gets-installed/what-gets-installed.md @@ -0,0 +1,66 @@ +# What gets installed + +When you set up Engineering360 in Port, you get a complete “starter kit” built from Port’s core building blocks: blueprints, dashboards, actions, and automations. Think of it as an API for engineering intelligence—out of the box, you can track DORA metrics and collect developer sentiment with ready-made components. This foundation is fully customizable, so you can extend, automate, and adapt every part to fit your unique engineering workflows. + +In this section, you’ll find guides to what gets installed, how to integrate and customize each building block, and ways to unlock even more value from your Engineering360 starter kit. + +### DORA metrics +This experience provides: +- A prebuilt setup for collecting DORA metrics. +- Flexible configuration of what counts as deployments and incidents. +- Automated data ingestion via integrations and APIs. +- A centralized dashboard for visibility and insights. + +When you install the DORA Metrics experience, Port automatically generates the following components to help you configure, ingest, and visualize your engineering performance data: + +#### Blueprints +`Deployment` and `Incident` — the main components used to calculate your DORA metrics. +#### Dashboard pages +`Set up your deployments` — A dashboard page to define what qualifies as a deployment in your organization. +`Set up your incidents` — A dashboard page to define what qualifies as an incident in your organization. +`DORA metrics` — A dashboard page that helps you track your organization's engineering performance over time. +#### Self-service actions +`Create a deployment` — an action that creates a deployment. +* By default, the dashboard page will contain multiple actions to create a deployment, one for each definition of a deployment. +`Create an incident` — an action that creates an incident. +* By default, the dashboard page will contain multiple actions to create an incident, one for each definition of an incident. +#### Integration mapping +When a user executes the self-service action to define deployments or incidents, the relevant integration mapping (according to the selected deployment/incident method) is updated with a new block. +This automates a manual step that would otherwise require editing the integration mapping directly. +:::Note: +filters in the action use an AND operator. To achieve OR logic, run the action multiple times with different filter sets. +::: +#### Additional components +Port also creates supporting technical mechanisms to ensure stable and reliable data ingestion: +#### Blueprints — Used to avoid accidental data loss during resyncs. +For example, closed pull requests are deleted on resync by default to avoid ingesting historical data. +To preserve relevant data: +Closed PRs are first ingested into a hidden `_dora_deployment_event` blueprint. +An automation that upserts the data into the main `Deployment` blueprint. +This ensures only the hidden blueprint is affected by resync deletions. +#### Automations — Ensure reliable data flow from configuration to ingestion: +Your self-service actions define how deployments and incidents are tracked. +These definitions update integration mappings, which ingest data into hidden blueprints. +Automations then upsert that data into the main blueprints, protecting it from resync deletions. +### Surveys +When you set up a survey, Port automatically creates the following components to enable survey distribution and feedback collection: +#### Blueprints +These blueprints model the survey data and are only created the first time you install a survey: +`Survey Template` — Defines the structure of a survey that can be reused multiple times. +`Survey` — Represents each instance of a survey template. +`Question Template` — Defines reusable question formats like "text" or "selection". +`Question` — Contains the actual questions being asked in a particular survey. +`Response` — Stores individual survey responses submitted by users. + +#### Self-service action +A self-service action will be created for each survey instance. +This action allows developers to respond to the survey. + +You can control who can respond to the survey via the action's permissions. + +#### Dashboard page +Visualizes survey submissions and aggregates trends in developer sentiment. + +#### Additional resources +To capture responses, the experience also includes a webhook data source that ingests survey responses when submitted via the self-service action. + diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/_category_.json new file mode 100644 index 0000000000..62fb45676d --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "The State of Engineering Intelligence", + "position": 3 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/_category_.json new file mode 100644 index 0000000000..73e76f6a7c --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Common pitfalls in engineering metrics", + "position": 1 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/common-pitfalls-in-metrics.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/common-pitfalls-in-metrics.md new file mode 100644 index 0000000000..4fea5ce8ec --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/common-pitfalls-in-metrics.md @@ -0,0 +1,27 @@ +# Common Pitfalls in Engineering Metrics + +Collecting engineering metrics is easy; using them correctly is hard. Many organizations fall victim to **misused or “vanity” metrics** that create a false sense of progress. A classic example is measuring **lines of code written** – this number can grow while actual productivity or code quality declines. In fact, more code can signal inefficiency, and focusing on LOC incentivizes developers to write unnecessary code. Similarly, **commit frequency** or **pull request counts** might seem like useful measures of output, but taken alone they tell little about real value delivered. Developers can game these metrics (for instance, by making many trivial commits) without improving the product. As one engineering blog summarized, these kinds of metrics all share a flaw: they measure output and pretend it equals productivity – which simply “doesn't work” in creative endeavors like software. + + +Another pitfall is often encountered with metrics programs. This warns that “when a measure becomes a target, it ceases to be a good measure.” Teams that fixate on hitting a number may **optimize for the metric at the expense of actual outcomes**. For example, if leadership mandates an increase in deployment frequency without regard to quality, teams might push more releases but introduce more bugs – hitting the frequency target while harming stability. We’ve seen cases where story point velocities are boosted (developers inflate estimates to look productive) or test counts go up (by writing trivial tests) just to satisfy a dashboard. **The wrong focus leads to negative consequences**, as teams chase numbers instead of true business impact. + + +Over-measuring and over-analysis pose further risks. **Too many metrics** can confuse priorities – teams drown in data but can’t see which issue to tackle first. Metrics are also easily **misinterpreted** without context. A team with a low change failure rate, for instance, might look “good” until you realize they deploy so infrequently that they’re not testing anything. **Aggregated scores** (like single composite metrics) are especially suspect, as they obscure underlying factors and can be misleading. + + +**Real-world examples of metrics gone wrong abound**. One cited instance is when a company rewarded teams purely on ticket closure rates – only to find issues being split into many trivial tickets to boost counts, while actual customer satisfaction dropped. In another case, a dev team set a goal to reduce mean time to recovery (MTTR) for incidents; they met it by reclassifying severe outages as lower priority incidents (so they weren’t counted in MTTR), thus “improving” the metric on paper while users still suffered downtime. + + +**How can teams avoid these pitfalls?** Port recommends a few guiding practices: + +- **Focus on Actionable Metrics**: Every metric tracked should drive a decision or change. If a number won’t alter any behavior, it’s likely not worth obsessing over. Metrics must drive decisions, not exist for their own sake. + +- **Balance Quantitative with Qualitative**: To get the full picture, combine the numbers with developer feedback. This prevents blind spots – for example, pairing deployment stats with a survey on developer morale can reveal if faster releases are burning out the team. + +- **Provide Context**: Always interpret metrics in context. Instead of viewing any single number in isolation, look at trends and related metrics (e.g. a dip in deployment frequency alongside an uptick in incidents). This holistic approach guards against the tunnel vision. + +- **Avoid Vanity and Composite Scores**: Favor metrics that reflect true outcomes (customer value, reliability) over those that are easy to game. If you use composite indices, regularly revisit their components to ensure they remain relevant. + +- **Continuous Review and Adaptation**: Metrics programs aren’t “set and forget.” Periodically re-evaluate what you measure. As your teams and product evolve, ensure your metrics still align with your goals and aren’t incentivizing the wrong behaviors. + +By sidestepping these common pitfalls, engineering organizations can ensure their metrics programs genuinely support improvement. The goal is to shine a light on reality – even when it’s uncomfortable – and then take action, rather than to paint a rosy picture with meaningless numbers. \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/designing-actionable-surveys/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/designing-actionable-surveys/_category_.json new file mode 100644 index 0000000000..9f9054b966 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/designing-actionable-surveys/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Designing actionable surveys", + "position": 5 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/designing-actionable-surveys/designing-actionable-surveys.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/designing-actionable-surveys/designing-actionable-surveys.md new file mode 100644 index 0000000000..746ea6074e --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/designing-actionable-surveys/designing-actionable-surveys.md @@ -0,0 +1,26 @@ +# Designing Actionable Surveys + +Conducting surveys is only half the battle – the other half is designing them in a way that yields useful, actionable insights. An “actionable” survey means that its questions and structure lead to clear understanding and can inform decisions or changes. Here are best practices and principles for designing surveys that genuinely drive action in the engineering context: + +### 1. Start With the End in Mind +Before drafting any question, ask: “*What would I do if I got a certain response?*” For a survey to be actionable, you should have an idea of the potential actions each question could prompt. If you find a question is “nice to know” but you can’t envision acting on any of its possible answers, consider cutting it. For example, asking “*Which programming language do you prefer?*” might be interesting, but unless you plan to change something based on that (like offer training in a popular language, or consider standardizing languages), it might not be actionable. On the other hand, “*How would you rate the ease of our release process?*” is directly tied to something you can improve if ratings are low (e.g., simplify the process, add automation). **Each survey item should connect to a potential improvement area**. + + +### 2. Use Clear and Specific Questions +Vague or compound questions produce muddled answers. Design questions that target a single aspect clearly. Instead of “Are tools and processes adequate?”, break it down: “Do you have the tools you need to do your job effectively?” and separately “Do our current development processes (e.g. code review, CI) support rapid delivery?”. This way, if you get a negative response, you know whether to look at tools or processes specifically. Avoid double-barreled questions (e.g., “Do you find the onboarding and training helpful?” – a person might find onboarding good but training poor; you won’t know how to act). If needed, add context or examples in the question to ensure everyone interprets it similarly (e.g., “Rate your satisfaction with code reviews (e.g., feedback quality, speed of reviews)”). Clarity leads to actionable specificity – you’ll know exactly what area the feedback is about. + + +### 3. Keep It Concise +Lengthy surveys with dozens of questions can overwhelm respondents, leading to drop-offs or superficial answers, which in turn reduces actionability. It’s better to have a short survey with high-quality data than a long one full of half-hearted answers. Prioritize questions: identify the “must-haves” vs “nice-to-haves.” In an engineering context, developers appreciate conciseness. Aim for a survey that can be completed in a reasonable time (e.g., 5-10 minutes for a quarterly survey, 1-2 minutes for a pulse). A common technique is to have a core set of questions that remain consistent (to track trends) and a few rotating questions on timely topics (to gather insight on new issues). This keeps surveys focused. Shorter surveys also directly combat fatigue – people are more likely to give thoughtful feedback if they see only a handful of questions. + + +### 4. Balance Closed and Open-Ended Questions +Closed questions (yes/no, multiple choice, rating scales) are easy to quantify and compare. Open-ended questions (free text) provide richness and context that numbers alone can’t. An actionable survey often uses a mix: closed questions to measure and detect issues, and a few open-ended prompts to capture suggestions or deeper explanations. For example, you might ask “Rate your satisfaction with our documentation (1-5)”, and follow with an open question: “What’s one thing we could do to improve documentation for you?”. The rating tells you if there’s a problem, and the comment tells you what action might help. Be careful not to make the survey too open-ended – developers are busy, and writing long responses takes time. Use open questions sparingly for key areas. Also, when analyzing, treat open responses seriously: categorize them, look for common themes, and highlight representative quotes when presenting findings. Often the qualitative insights are what really drive action, because they tell stories that pure numbers cannot. + + +### 5. Plan for Results Communication and Follow-up +When designing, think about how results will be reported and what follow-up discussions might look like. If you create a survey full of highly technical questions that only the platform team will understand, how will you communicate those results to a broader audience or leadership? It might be fine if that’s your target, but often you’ll share a summary with the whole engineering org. So, phrase questions in a way that you’d be comfortable having the aggregated responses up on a slide. Also, design questions to facilitate year-over-year or iteration-over-iteration comparison. If you intend to run the survey again, keeping some questions consistent allows trending (e.g., “Developer satisfaction with dev environment: 3.5 this quarter vs 3.0 last quarter”). Trendable metrics are actionable in that they show if changes you made had effect. + + +### Turning developer feedback into real action +By following these guidelines, you’ll craft surveys that yield clear, meaningful insights. An actionable survey yields responses like “X% of devs are dissatisfied with Y” or “Top frustration is Z”, which you can directly feed into your engineering roadmap or retrospectives. It moves the conversation from anecdotal “I feel” to data-driven “The team has spoken, and we need to do something about it.” In Port’s Engineering360, we’ve baked in these best practices .Remember, a well-designed survey is not just a diagnostic tool but a catalyst for improvement. \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/_category_.json new file mode 100644 index 0000000000..93e49169f7 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "DORA Metrics explained", + "position": 4 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/balancing-the-metrics/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/balancing-the-metrics/_category_.json new file mode 100644 index 0000000000..3ea17b434a --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/balancing-the-metrics/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Balancing the metrics", + "position": 5 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/balancing-the-metrics/balancing-the-metrics.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/balancing-the-metrics/balancing-the-metrics.md new file mode 100644 index 0000000000..33c348e8a0 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/balancing-the-metrics/balancing-the-metrics.md @@ -0,0 +1,19 @@ +# Balancing the Metrics + +A critical concept with DORA metrics is that optimizing one should not mean sacrificing another. They are meant to be achieved in harmony. For example, simply cranking up Deployment Frequency without care can hurt CFR (more rushed changes = more failures). Likewise, obsessing over zero failures could slow you down (if teams become overly cautious or bureaucratic). The goal is to improve both velocity and stability together, which is a mark of true DevOps high performance. It’s better to deploy 100 times with 5 quick-to-recover failures, than to deploy 5 times with 1 failure that takes ages to fix. So watch these metrics as a set. Port’s dashboards allow you to see them side by side, and even overlay trends – for instance, see how a change in lead time correlates with change failure rate. + +### Using DORA Metrics Effectively +When implemented well, DORA metrics can provide enhanced visibility and standardization for your engineering org. They give teams a common language (e.g., “We improved our lead time by 20%!” or “Our CFR is above industry benchmark, let’s address that.”). To avoid the pitfalls: + +- **Use ranges/benchmarks wisely**: Google’s research provides benchmarks for Elite, High, Medium, Low performers for each metric. These are useful for goal-setting, but consider your context. Use them as inspiration, not strict grades. + +- **Contextualize each metric**: Always discuss why a metric is at its level. The numbers should prompt questions and investigation. Port’s advantage is you can click through from a metric to related data (like from a high MTTR to see the actual incidents, their details, and even survey feedback about those incidents). + +- **Drive conversations and actions**: Share DORA metrics with teams regularly (e.g., weekly or sprint reviews). When a metric moves in the wrong direction, treat it as a team problem to solve, not an individual blame. Perhaps set up a guild or task force to tackle cross-cutting issues affecting metrics. + +- **Celebrate improvements**: When these metrics improve, recognize it! If you bring CFR down or lead time down, it usually means a lot of incremental improvements behind the scenes. Celebrating wins encourages teams to keep investing in DevOps practices. + +- **Expand beyond DORA when ready**: DORA is a great starting point, but as Port often advises, once you’ve established a baseline, you may explore additional metrics. For example, pull request cycle time (mentioned earlier) or code review turnaround are not in DORA but extremely useful. We’ll cover custom metrics in the next page. + + +In summary, DORA metrics are popular for good reason: they are simple, outcome-focused, and empirically linked to high performance. By understanding their nuances and using them as part of a balanced scorecard, you can leverage them to significantly improve your engineering processes. Port’s Engineering360 natively supports DORA metric tracking and, importantly, helps you tie those metrics to actions – ensuring that a dip in a DORA metric triggers the right response, and an improvement is capitalized on to push even further. \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/change-failure-rate/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/change-failure-rate/_category_.json new file mode 100644 index 0000000000..0cb0c1028f --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/change-failure-rate/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Change failure rate", + "position": 4 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/change-failure-rate/change-failure-rate.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/change-failure-rate/change-failure-rate.md new file mode 100644 index 0000000000..f82f85e0be --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/change-failure-rate/change-failure-rate.md @@ -0,0 +1,17 @@ +# Change Failure Rate (CFR) + +**Definition**: The percentage of deployments to production that result in a failure requiring remediation. “Failure” typically means a deployment that caused an incident, outage, rollback, or severe bug that necessitated immediate fix. For example, if 100 deployments in a period resulted in 5 rollbacks or hotfixes, CFR is 5%. Lower is better. + +**What it tells you**: CFR is a measure of quality and stability in releases. It indicates the rigor of testing and validation and the effectiveness of your release processes. A low change failure rate means most releases are successful and stable, which is a hallmark of mature DevOps teams. DORA research found elite performers have CFR in the low single-digits or even zero in many cases. + +**What it misses or risks**: CFR depends on defining what counts as a "failure". Teams might underreport failures (consciously or not) to look good. If incidents are not tracked, or if a problem is discovered much later, it might not be attributed to a specific deployment, masking the issue. Also, if you deploy infrequently, one failure among few deploys can make CFR look high percentage-wise, even if absolute number of failures isn’t big. Conversely, deploying very often might make CFR look tiny percentage-wise but still result in many failures in absolute terms. CFR also doesn’t detail severity – one could argue a single deployment that caused a massive outage is worse than three that caused minor glitches, but CFR treats them numerically. + +**Common misinterpretations**: One might think “zero failures” is always the goal – but beware of Goodhart’s Law here: a team could achieve zero CFR by deploying far less often or by classifying all issues as non-deployment-related. It’s the classic testing paradox: if you never release, you never fail. So context is key. Also, a slightly higher CFR might be acceptable if the organization values pushing boundaries and is able to recover quickly (fast MTTR). In other words, CFR shouldn’t be looked at in isolation – consider it with MTTR. High CFR + high MTTR is dangerous (lots of failures, slow to fix). High CFR + very low MTTR might indicate a fast-moving, experimental culture (though ideally, we aim for low CFR anyway). + +**Improvement strategies**: +- Improve testing and CI quality gates: Ensure robust automated tests (unit, integration, end-to-end) run before deploys. Possibly add canary releases or feature flag rollouts to catch issues on a subset of users first. +- Peer review and static analysis: Strengthen code review practices, add linting, static code analysis, security scans to catch defects before deploy. +- Observability in production: This doesn’t directly lower CFR but helps identify failures quicker. Good monitoring and logging mean if a deployment has an issue, you detect it and trigger rollback faster (which might still count as a failure, but reduces impact). +- Post-deployment validation: Automate smoke tests right after deployment in production to verify key functionalities. This can catch failures early and possibly automate rollback if needed. +- Analyze patterns: Look at past failed changes – were they due to certain kinds of changes or certain subsystems? Feed that info back into planning and risk assessment. For example, if deployments on Friday have higher failure, maybe instill a practice of no-Friday-deploys or double testing on Friday. +- Culture and Process: Encourage a blameless post-mortem culture to learn from failures. Sometimes CFR is high because systemic issues (like environment drift or lack of integration tests) cause repeated failures. Address the root causes identified in retrospectives. \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/deployment-frequency/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/deployment-frequency/_category_.json new file mode 100644 index 0000000000..3f0472511a --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/deployment-frequency/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Deployment Frequency", + "position": 1 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/deployment-frequency/deployment-frequency.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/deployment-frequency/deployment-frequency.md new file mode 100644 index 0000000000..81f53b80a1 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/deployment-frequency/deployment-frequency.md @@ -0,0 +1,15 @@ +# Deployment Frequency + +**Definition**: How often an organization successfully deploys code to production or releases to end-users. Typically measured as deployments per day, week, or month. Higher frequency means smaller, more frequent releases. + +**What it tells you**: Deployment Frequency is a proxy for how quickly value is flowing to customers. Elite performers might deploy on-demand or multiple times a day, indicating a very efficient CI/CD pipeline and culture of continuous delivery. A high DF often correlates with smaller batch sizes, which reduces risk and makes troubleshooting easier (because changesets are small). + +**What it misses or risks**: DF alone doesn’t account for stability or quality. It’s possible to deploy frequently but with many failures or bugs. Teams could also game DF by deploying trivial changes or toggling features on/off just to count a “deployment.” If measured without a clear definition, teams might count different things (merges to main vs. actual production deploys). Over-focusing on DF can lead to neglecting quality – e.g. pushing unfinished code just to increase count. + +**Common misinterpretations**: Low deployment frequency isn’t always bad – it may be a deliberate choice for certain industries (e.g., aerospace or medical software might deploy less due to heavy validation needs). It’s important to compare DF in context: if your releases are monthly because of business cadence, that might be fine, but if it’s monthly because your process is painfully manual, that’s an issue. Also, an uptick in DF accompanied by rising incident rates could signal rushing. + +**Improvement strategies**: +- Trunk-based development & small batches: Encourage small, frequent merges to main and use feature flags. This way, code is always in a deployable state. +- Address bottlenecks: If analysis shows code sits waiting for review or testing, invest in those areas (e.g., add reviewers or speed up tests). +- Infrastructure as Code & Immutable infra: These practices make deployments more reliable, which in turn gives confidence to deploy more often +- Set a goal and measure trend: For example, “increase DF from bi-weekly to weekly within next quarter” and track progress. \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/dora-metrics-explained.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/dora-metrics-explained.md new file mode 100644 index 0000000000..7cdf427ac1 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/dora-metrics-explained.md @@ -0,0 +1,4 @@ +# DORA Metrics explained + +The DORA metrics are a set of four key metrics identified by the DevOps Research and Assessment (DORA) team. These metrics have become an industry standard for measuring software delivery performance. +They are often grouped into two categories – velocity and stability – covering the speed of delivery and the quality of outcomes. High-performing teams aim to excel at both simultaneously. Let’s break down each DORA metric, understand what it tells you (and what it doesn’t), common pitfalls or misinterpretations, and strategies for improvement: \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/lead-time-for-change/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/lead-time-for-change/_category_.json new file mode 100644 index 0000000000..64c579f8e5 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/lead-time-for-change/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Lead time for change", + "position": 2 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/lead-time-for-change/lead-time-for-change.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/lead-time-for-change/lead-time-for-change.md new file mode 100644 index 0000000000..58e9d2ccdb --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/lead-time-for-change/lead-time-for-change.md @@ -0,0 +1,16 @@ + +# Lead Time for Changes +**Definition**: The time it takes from code being committed (or a work item being started) to that change being deployed in production. Essentially, how long do code changes sit in the system before reaching users. Sometimes measured as commit-to-deploy or pull request merge to deploy. + +**What it tells you**: Lead Time is a direct measure of cycle time efficiency. Short lead times mean features/bug fixes move quickly through the pipeline, indicating smooth handoffs between dev, test, deploy stages. It reflects process health: code integration, testing, and release processes. Shorter LT usually implies more agile teams and can correlate with faster feedback loops. + +**What it misses or risks**: A very short lead time could mean your changes are small and pipeline is automated – good – or it could mean you’re skipping steps (bad). Also, lead time doesn’t capture planning or code writing time; a team might spend weeks designing and coding, but if they merge and deploy in a day, Lead time looks short. So it’s not whole “idea to prod” time, just code integration to prod. Comparing lead times between teams can be tricky if definitions differ (e.g., one measures from first commit, another from PR creation). + +**Common misinterpretations**: Don’t confuse lead time with “time to value” for end-users. It’s possible to have a feature toggle in code that’s deployed quickly (short LT) but business doesn’t release it to users until a marketing event, which is outside engineering’s scope. Also, a long lead time might be by design for bundling releases (though that’s less common nowadays). If mis-measured, teams might think they’re doing fine when they deploy quickly once code is merged – ignoring perhaps a huge backlog before merge. It’s crucial to define the start point of lead time consistently. + +**Improvement strategies**: +- Streamline code review and approval: If waiting for approvals is a big chunk, establish SLA for reviews or use pair programming to reduce review overhead. +- Automated testing: Ensure your test suite (unit/integration) is robust and fast. Long test cycles or manual QA can balloon lead time. Invest in parallelizing tests or using cloud test services. +- Continuous Integration practices: Commit small, commit often. This reduces the merge pain and integration bugs that can stall releases. +- Value Stream Mapping: Map out every step from code commit to deploy (CI steps, QA, staging, etc.) and look for waste. Perhaps there’s an unnecessary manual staging sign-off that can be automated or removed. +- Improve build times: Sometimes long lead time is simply due to slow build or artifact creation processes. Build caching or more build agents can help. \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/mttr/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/mttr/_category_.json new file mode 100644 index 0000000000..46e47a190e --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/mttr/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Mean time to recovery", + "position": 3 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/mttr/mttr.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/mttr/mttr.md new file mode 100644 index 0000000000..db31a61659 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/dora-metrics-explained/mttr/mttr.md @@ -0,0 +1,17 @@ +# Mean Time to Recovery (MTTR) + +**Definition**: When a production incident or failure occurs, MTTR measures how long on average it takes to restore service. Essentially, from the start of an outage (or degradation) to full recovery. Often measured in hours or minutes. Shorter MTTR means you can bounce back quickly. + +**What it tells you**: MTTR is a key operational resilience metric. It indicates the effectiveness of your incident response process – monitoring, alerting, on-call efficiency, and the simplicity or redundancy of your systems. A low MTTR means even if failures happen (and some will, inevitably), the impact on users is minimized by swift recovery. It reflects well on DevOps practices like automation in incident management and good disaster recovery planning. + +**What it misses or risks**: MTTR can be tricky to measure consistently – what if an incident is partially resolved, or temporarily fixed and then reoccurs? Do you count time to temporary mitigation or full resolution? Organizations define this differently. Also, MTTR doesn’t reflect the frequency of incidents; a team might have few incidents but each takes a day to resolve vs. another with many small incidents resolved in minutes. MTTR by itself doesn’t tell you how reliable the system is, just how quickly you fix issues. Additionally, similar to CFR, teams could game MTTR by focusing on quick fixes that might not be thorough (thus perhaps causing new issues later), or by redefining "recovery" loosely. + +**Common misinterpretations**: Sometimes MTTR is taken as a personal metric (“how fast individual X resolves issues”). It should really be about the system and team process, not individuals. Comparing MTTR across teams can be unfair if their incident types differ widely (e.g., a database incident vs. a minor UI bug; the latter “recovers” faster by nature). Also, extremely low MTTR might imply you’re really good at firefighting – which is great, but it might also hide that you’re having to firefight a lot. Always pair MTTR with CFR and incident frequency to get a full picture of stability. + +**Improvement strategies**: +- Strong monitoring and alerts: The faster you detect an issue, the faster you can resolve it. Invest in APM tools, uptime monitors, error alerting. Reduce mean time to detect, which is a subset of MTTR. +- On-call practices: Ensure a responsive on-call rotation. This includes proper training, accessible runbooks, and perhaps lightweight on-call tooling (like one-click access to dashboards or predefined queries). +- Automated remediation: Where possible, automate recovery steps. For example, if a server goes down, auto-trigger a replacement (auto-scaling or container restarts). If a known issue occurs, script the fix (like clear queue, restart service X). +- Runbooks and Knowledge Base: Document common failure scenarios with step-by-step guides. In the heat of an incident, having a playbook saves precious time. New engineers on call especially benefit from this. +- Chaos Engineering: It might sound counterintuitive, but intentionally injecting failures in non-prod (or even prod carefully) can train the team and harden systems to recover quickly. By practicing failure scenarios (like disaster recovery drills, game days), you reduce panic and hone your MTTR down. +- Post-incident reviews: After each major incident, do a retro. Did we take too long to find the root cause? Why? Maybe logs were missing or alarms didn’t trigger. Fix those process issues so next time recovery is quicker. For instance, if access to a production environment took 30 minutes to grant during an incident, that’s actionable (maybe automate that as Port suggests with just-in-time access for on-call engineers) diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/_category_.json new file mode 100644 index 0000000000..ba20ca7480 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Engineering metrics: what matters", + "position": 3 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/engineering-metrics-what-matters.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/engineering-metrics-what-matters.md new file mode 100644 index 0000000000..888c8324c2 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/engineering-metrics-what-matters.md @@ -0,0 +1,33 @@ +# Engineering Metrics: What Matters + +Not all metrics are created equal. In the realm of engineering metrics, **what you measure should align with what truly matters to your organization’s success**. This page provides an overview of the types of metrics that engineering leaders find most valuable, and how to ensure your metrics program stays focused on impact and business alignment. + +**Frameworks and Categories**: Over the years, several frameworks have emerged to categorize engineering metrics. One widely adopted set is the **DORA metrics**, which focus on software delivery performance (more on DORA in the subpage). But engineering work is multifaceted, and leaders often track metrics across multiple categories: + + +- Velocity and Throughput Metrics: How fast are we delivering? This includes DORA’s Deployment Frequency and Lead Time for Changes, as well as related measures like cycle time (time from work start to release) or story completion rate. These metrics fall under the umbrella of process or productivity metrics – they gauge the efficiency of development workflows. + + +- Quality and Stability Metrics: Are we delivering reliably? DORA’s Change Failure Rate and MTTR (Mean Time to Recovery) live here, alongside things like test coverage, and incident counts. These ensure speed isn’t coming at the expense of stability. They align with product metrics in the sense of product quality, and operational metrics for reliability. + + +- People and Satisfaction Metrics: How are our engineers doing? These might include developer satisfaction scores, eNPS (employee Net Promoter Score) for engineers, onboarding duration for new hires, and attrition rates. Often considered people or team health metrics, they reveal morale, engagement, and team sustainability. + + +- Project and Delivery Metrics: Are we on track and using resources wisely? Here you have things like predictability (planned vs. done work), burndown charts, sprint velocity (when used carefully), and even cost metrics (like cloud spend per feature or cost of delay). These intersect with project management and can tie engineering output back to business outcomes (like features delivered per quarter, etc.). + + +- Compliance and Standardization Metrics: Are we following best practices and standards? For example, percentage of services with a documented owner, coverage of monitoring on services, security scan compliance. These could be considered standards-based metrics, ensuring engineering work meets certain governance criteria. + +It’s easy to be overwhelmed by metrics. The key is to recognize that metrics are a means to an end, not an end in themselves. So, what end do we care about? +Generally: improving engineering outcomes that matter to the business. For most organizations, that means faster delivery of value, higher quality and stability, happier teams, and ultimately satisfied customers. + +**Business Alignment**: A good litmus test for any metric is to ask, “If this metric changes, does someone outside of engineering care?” Take Deployment Frequency – an exec might care because faster deployments (when done right) mean faster time to market, which can drive revenue or competitive advantage. Developer satisfaction – a CEO might care indirectly, since unhappy developers lead to turnover and slowed innovation. If you struggle to find a narrative for why a metric matters beyond the engineering department, it might be a vanity metric. It’s often helpful to tie engineering metrics to broader KPIs or OKRs. For example, if a company OKR is to improve customer retention, engineering might support that with metrics on incident response times (to improve uptime) and feature flow (to deliver promised enhancements on schedule), both of which clearly connect to customer experience. In Port, you can even map scorecards to business objectives, making this linkage explicit. + +**Focus on Key Metrics**: Especially at the start, it’s wise to focus on a small, balanced set of metrics that cover multiple dimensions. A classic recommendation is to implement DORA metrics for a baseline view of speed and stability – they’re research-backed and give you a clear initial benchmark. Then add one or two metrics around developer sentiment or team health (to gauge how sustainable your pace is). Finally, include a metric or two related to any critical engineering initiative or pain point unique to your context – for instance, if technical debt is a big issue, maybe track refactor efforts or code churn; if hiring/onboarding is a focus, track onboarding ramp time. By limiting the set, you ensure clarity. Each metric should tell a story and provoke a conversation about improvement, rather than just being a number on a slide. + +**Continuous Improvement over Targets**: It’s worth reinforcing that engineering metrics are most powerful when used for continuous improvement, not fixed targets. Setting reasonable goals or working agreements (e.g., “We strive to keep MTTR under 1 hour” or “PR review turnaround within 24 hours”) can motivate and align teams. But the real point is to use metrics to identify trends and outliers, discuss them in retrospectives or ops reviews, and experiment with changes. Metrics should feed a learning process. For example, if deployment frequency is lower than desired, dig into why – is it due to flaky tests, or maybe batching of work? Then try a solution (like more parallelization or smaller batch sizes) and see if the metric responds. This iterative approach keeps the focus on improvement rather than judgment. + +**Leveraging Port for Metrics that Matter**: Finally, Port’s Engineering360 is designed to help focus on what matters by enabling custom dashboards and scorecards for the metrics you choose, not just a predetermined set. You might use our templates for DORA, but you’ll also define what your organization cares about. By having all relevant metrics in your portal, you can see the big picture: how process metrics (like DORA) intersect with people metrics (like survey results) and standards metrics (like compliance percentages). + +In short, measure what matters, and matter to what you measure. By focusing on meaningful engineering metrics, aligned to business outcomes and team well-being, you set the stage for impactful improvements. \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/ports-philosophy-beyond-numbers/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/ports-philosophy-beyond-numbers/_category_.json new file mode 100644 index 0000000000..abb9bbb0a6 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/ports-philosophy-beyond-numbers/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Port's philosophy beyond numbers", + "position": 2 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/ports-philosophy-beyond-numbers/ports-philosophy-beyond-numbers.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/ports-philosophy-beyond-numbers/ports-philosophy-beyond-numbers.md new file mode 100644 index 0000000000..27267c213a --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/ports-philosophy-beyond-numbers/ports-philosophy-beyond-numbers.md @@ -0,0 +1,16 @@ +# Port’s Philosophy: Beyond Numbers + +**Port’s philosophy on engineering metrics is simple**: metrics are only as valuable as the actions they spark. In our view, collecting data and generating reports is the starting point, not the end goal. Too often, organizations treat metrics as a passive scoreboard – Port believes metrics should be a **springboard for change**. If a dashboard insight doesn’t lead to a discussion, an experiment, or an intervention, its value is effectively zero. + + +This opinionated stance comes from experience. There’s no shortage of tools that produce graphs and charts about engineering performance. But teams frequently find themselves asking, “We have the data—now what?”. Port’s answer is to bake the “now what” directly into the platform. **Engineering metrics must lead to concrete next steps**, and ideally, those steps can be initiated right where you see the insight. For example, if Engineering360 surfaces that your deployment frequency is dropping, the platform not only shows this but can help you trigger a workflow to address it – such as launching a retrospective or an automation to improve CI/CD speed. + + +In Port’s view, **Software Engineering Intelligence is a strategic lever** for continuous improvement, not just a reporting layer for executives. We emphasize moving “**from insight to action” all within your internal developer portal**. This means the same tool that highlights a bottleneck (say, a spike in lead time for changes) also empowers the team to fix it (for instance, by scheduling an automated code review reminder or spinning up a self-service environment to test a hotfix). By shortening the path from observation to remediation, organizations can respond to problems before they fester. + + +Port is bold about going **beyond the numbers** in another sense: we believe that improving developer experience and productivity isn’t just about optimization and efficiency, it’s about building a culture of ownership and autonomy. Metrics should never be used to enforce blame. Instead, they serve as a feedback mechanism – one that both engineers and leaders can trust to guide decisions. In fact, Port intentionally **connects metrics with developer sentiment** surveys to ensure any narrative the numbers tell is grounded in reality. +If the data says one thing but engineers are saying another, that’s a flag to investigate deeper. This human-in-the-loop approach keeps the focus on meaningful improvement rather than chasing metrics for their own sake. + + +Ultimately, Port’s philosophy can be summed up as “**metrics with purpose**”. Every dashboard, scorecard, or report in Engineering360 is designed to prompt the question: “What will we do about this?” Whether it’s automating a repetitive task, adjusting a team’s processes, or opening a conversation about developer well-being, the goal is to turn metrics into momentum. By looking beyond the numbers and treating SEI as an active ingredient in your engineering strategy, you create a virtuous cycle – data leads to action, action leads to better data, and engineering excellence continuously reinforces itself. \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/the-state-of-engineering-intelligence.md b/docs/solutions/engineering360/the-state-of-engineering-intelligence/the-state-of-engineering-intelligence.md new file mode 100644 index 0000000000..def94da142 --- /dev/null +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/the-state-of-engineering-intelligence.md @@ -0,0 +1,14 @@ +# The state of engineering intelligence + +Engineering leaders today face an urgent need for visibility into how software teams work and what slows them down. Developers spend only a fraction of their time building new features—much of their week is lost to internal friction, tech debt, flaky processes, and scattered tools. + +These challenges gave rise to platform engineering and Software Engineering Intelligence (SEI) platforms as organizations seek data-driven ways to boost productivity and developer experience. Yet the engineering metrics/DevEx space is still evolving. + +Many companies track DevOps performance through frameworks like DORA, and some conduct developer satisfaction surveys – but too often these insights live in separate silos. This isolation means teams might know what is happening (e.g. deployment frequency dropped) without knowing why (e.g. developers frustrated with a particular tool). + +The result is uncertainty in prioritizing improvements. Engineering leaders commonly voice a pattern: “We know there are productivity gaps, but we don’t know exactly where—or how—to prioritize them.” Modern Engineering Intelligence aims to close this gap. The role of SEI is to aggregate and analyze data across the software development lifecycle, from code repositories and CI pipelines to project trackers and incident management. By unifying these data streams, SEI platforms provide a “single pane of glass” for engineering health. Crucially, Port’s view is that insight alone isn’t enough – data must lead to action. Metrics should not be mere reporting tools for executives, but levers for continuous improvement in developer culture and workflow. + +Port’s Engineering360, for example, combines quantitative metrics with qualitative developer feedback in one place, so teams can pinpoint friction and address it directly in Port. +Instead of guessing what will help developers, engineering organizations can strategically invest in improvements backed by real data on where engineers struggle most. + +Overall, the state of engineering intelligence is one of growing recognition: to move faster responsibly, you need both metrics and empathy. Companies are moving beyond vanity stats to focus on meaningful indicators (like DORA’s speed and stability measures) and complementing them with developer sentiment. The SEI domain is maturing from basic dashboards toward integrated, actionable intelligence – helping engineering leaders cultivate high-performing, happy teams in a data-informed way. \ No newline at end of file diff --git a/docs/solutions/engineering360/use-cases/_category_.json b/docs/solutions/engineering360/use-cases/_category_.json new file mode 100644 index 0000000000..f46b69faef --- /dev/null +++ b/docs/solutions/engineering360/use-cases/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Use Cases", + "position": 5 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/use-cases/use-case-1/_category_.json b/docs/solutions/engineering360/use-cases/use-case-1/_category_.json new file mode 100644 index 0000000000..d2e8b1c29b --- /dev/null +++ b/docs/solutions/engineering360/use-cases/use-case-1/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Use Case 1", + "position": 1 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/use-cases/use-case-1/use-case-1.md b/docs/solutions/engineering360/use-cases/use-case-1/use-case-1.md new file mode 100644 index 0000000000..6a7a29dea4 --- /dev/null +++ b/docs/solutions/engineering360/use-cases/use-case-1/use-case-1.md @@ -0,0 +1 @@ +use case 1 \ No newline at end of file diff --git a/docs/solutions/engineering360/use-cases/use-case-2/_category_.json b/docs/solutions/engineering360/use-cases/use-case-2/_category_.json new file mode 100644 index 0000000000..c4dd5aa564 --- /dev/null +++ b/docs/solutions/engineering360/use-cases/use-case-2/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Use Case 2", + "position": 2 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/use-cases/use-case-2/use-case-2.md b/docs/solutions/engineering360/use-cases/use-case-2/use-case-2.md new file mode 100644 index 0000000000..06e120ebe0 --- /dev/null +++ b/docs/solutions/engineering360/use-cases/use-case-2/use-case-2.md @@ -0,0 +1 @@ +use case 2 \ No newline at end of file diff --git a/docs/solutions/engineering360/use-cases/use-case-3/_category_.json b/docs/solutions/engineering360/use-cases/use-case-3/_category_.json new file mode 100644 index 0000000000..7b9aad5448 --- /dev/null +++ b/docs/solutions/engineering360/use-cases/use-case-3/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Use Case 3", + "position": 3 +} \ No newline at end of file diff --git a/docs/solutions/engineering360/use-cases/use-case-3/use-case-3.md b/docs/solutions/engineering360/use-cases/use-case-3/use-case-3.md new file mode 100644 index 0000000000..4ec883cce4 --- /dev/null +++ b/docs/solutions/engineering360/use-cases/use-case-3/use-case-3.md @@ -0,0 +1 @@ +use case 3 \ No newline at end of file diff --git a/docs/solutions/engineering360/use-cases/use-cases.md b/docs/solutions/engineering360/use-cases/use-cases.md new file mode 100644 index 0000000000..066197bc01 --- /dev/null +++ b/docs/solutions/engineering360/use-cases/use-cases.md @@ -0,0 +1 @@ +use cases \ No newline at end of file diff --git a/docusaurus.config.js b/docusaurus.config.js index 6749bef359..e766414130 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -18,9 +18,9 @@ const config = { staticDirectories: ["static"], future: { experimental_faster: true, // turns Docusaurus Faster on globally - v4: { - removeLegacyPostBuildHeadAttribute: true, // required - }, + // v4: { + // removeLegacyPostBuildHeadAttribute: true, // required + // }, }, presets: [ From 20d991f649d455038c3b5f32a870e0d5aa46c11f Mon Sep 17 00:00:00 2001 From: Hadar Date: Tue, 10 Jun 2025 18:38:00 +0300 Subject: [PATCH 3/4] uncomment flag --- docusaurus.config.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docusaurus.config.js b/docusaurus.config.js index e766414130..6749bef359 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -18,9 +18,9 @@ const config = { staticDirectories: ["static"], future: { experimental_faster: true, // turns Docusaurus Faster on globally - // v4: { - // removeLegacyPostBuildHeadAttribute: true, // required - // }, + v4: { + removeLegacyPostBuildHeadAttribute: true, // required + }, }, presets: [ From 7253e09424f52539723140552e986cd43a2662c7 Mon Sep 17 00:00:00 2001 From: barittahport Date: Sun, 15 Jun 2025 11:37:07 +0300 Subject: [PATCH 4/4] changed pages location --- .../common-pitfalls-in-metrics/_category_.json | 2 +- .../engineering-metrics-what-matters/_category_.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/_category_.json index 73e76f6a7c..d64269e952 100644 --- a/docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/_category_.json +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/common-pitfalls-in-metrics/_category_.json @@ -1,4 +1,4 @@ { "label": "Common pitfalls in engineering metrics", - "position": 1 + "position": 3 } \ No newline at end of file diff --git a/docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/_category_.json b/docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/_category_.json index ba20ca7480..e78368f2e2 100644 --- a/docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/_category_.json +++ b/docs/solutions/engineering360/the-state-of-engineering-intelligence/engineering-metrics-what-matters/_category_.json @@ -1,4 +1,4 @@ { "label": "Engineering metrics: what matters", - "position": 3 + "position": 1 } \ No newline at end of file