diff --git a/src/sempy_labs/__init__.py b/src/sempy_labs/__init__.py index 49e9a61c..92fefda5 100644 --- a/src/sempy_labs/__init__.py +++ b/src/sempy_labs/__init__.py @@ -439,9 +439,6 @@ "resolve_capacity_name", "run_model_bpa_bulk", "create_model_bpa_semantic_model", - "list_deployment_pipeline_stage_items", - "list_deployment_pipeline_stages", - "list_deployment_pipelines", "get_git_connection", "get_git_status", "commit_to_git", @@ -595,4 +592,7 @@ "get_user_delegation_key", "refresh_sql_endpoint_metadata", "list_semantic_model_datasources", + "list_deployment_pipeline_stage_items", + "list_deployment_pipeline_stages", + "list_deployment_pipelines", ] diff --git a/src/sempy_labs/_deployment_pipelines.py b/src/sempy_labs/_deployment_pipelines.py index f45043f1..ab979dad 100644 --- a/src/sempy_labs/_deployment_pipelines.py +++ b/src/sempy_labs/_deployment_pipelines.py @@ -9,6 +9,33 @@ from uuid import UUID +def resolve_deployment_pipeline_id(deployment_pipeline: str | UUID) -> UUID: + """ + Obtains the Id for a given deployment pipeline. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + + Returns + ------- + uuid.UUID + The deployment pipeline Id. + """ + + if _is_valid_uuid(deployment_pipeline): + return deployment_pipeline + else: + dfP = list_deployment_pipelines() + dfP_filt = dfP[dfP["Deployment Pipeline Name"] == deployment_pipeline] + if len(dfP_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{deployment_pipeline}' deployment pipeline is not valid." + ) + return dfP_filt["Deployment Pipeline Id"].iloc[0] + + def list_deployment_pipelines() -> pd.DataFrame: """ Shows a list of deployment pipelines the user can access. @@ -63,8 +90,6 @@ def list_deployment_pipeline_stages(deployment_pipeline: str | UUID) -> pd.DataF A pandas dataframe showing the specified deployment pipeline stages. """ - from sempy_labs._helper_functions import resolve_deployment_pipeline_id - columns = { "Deployment Pipeline Stage Id": "string", "Deployment Pipeline Stage Name": "string", @@ -126,8 +151,6 @@ def list_deployment_pipeline_stage_items( A pandas dataframe showing the supported items from the workspace assigned to the specified stage of the specified deployment pipeline. """ - from sempy_labs._helper_functions import resolve_deployment_pipeline_id - columns = { "Deployment Pipeline Stage Item Id": "string", "Deployment Pipeline Stage Item Name": "string", @@ -181,3 +204,214 @@ def resolve_deployment_pipeline_stage_id( df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) return df + + +def list_deployment_pipeline_role_assignments( + deployment_pipeline: str | UUID, +) -> pd.DataFrame: + """ + Shows the role assignments for the specified deployment pipeline. + + This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipeline Role Assignments `_. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing the role assignments for the specified deployment pipeline. + """ + + columns = { + "Role": "string", + "Principal Id": "string", + "Principal Type": "string", + } + df = _create_dataframe(columns=columns) + + deployment_pipeline_id = resolve_deployment_pipeline_id( + deployment_pipeline=deployment_pipeline + ) + + responses = _base_api( + request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/roleAssignments", + uses_pagination=True, + client="fabric_sp", + ) + + dfs = [] + + for r in responses: + for v in r.get("value", []): + principal = v.get("principal", {}) + new_data = { + "Role": v.get("role"), + "Principal Id": principal.get("id"), + "Principal Type Name": principal.get("type"), + } + dfs.append(pd.DataFrame(new_data, index=[0])) + + if dfs: + df = pd.concat(dfs, ignore_index=True) + + return df + + +def delete_deployment_pipeline( + deployment_pipeline: str | UUID, +): + """ + Deletes the specified deployment pipeline. + + This is a wrapper function for the following API: `Deployment Pipelines - Delete Deployment Pipeline `_. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + """ + + deployment_pipeline_id = resolve_deployment_pipeline_id( + deployment_pipeline=deployment_pipeline + ) + + _base_api( + request=f"/v1/deploymentPipelines/{deployment_pipeline_id}", + method="delete", + client="fabric_sp", + ) + + print( + f"{icons.green_dot} The '{deployment_pipeline}' deployment pipeline has been deleted successfully." + ) + + +def list_deployment_pipeline_operations( + deployment_pipeline: str | UUID, +) -> pd.DataFrame: + """ + Shows the operations for the specified deployment pipeline. + + This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipeline Operations `_. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing the operations for the specified deployment pipeline. + """ + + columns = { + "Operation Id": "string", + "Type": "string", + "Status": "string", + "Last Updated Time": "string", + "Execution Start Time": "datetime_coerce", + "Execution End Time": "datetime_coerce", + "Source Stage Id": "string", + "Target Stage Id": "string", + "Note": "string", + "New Items Count": "int", + "Different Items Count": "int", + "No Difference Items Count": "int", + "Performed By Id": "string", + "Performed By Type": "string", + } + df = _create_dataframe(columns=columns) + + deployment_pipeline_id = resolve_deployment_pipeline_id( + deployment_pipeline=deployment_pipeline + ) + + responses = _base_api( + request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/operations", + uses_pagination=True, + client="fabric_sp", + ) + + dfs = [] + for r in responses: + for v in r.get("value", []): + p = v.get("preDeploymentDiffInformation", {}) + new_data = { + "Operation Id": v.get("id"), + "Type": v.get("type"), + "Status": v.get("status"), + "Last Updated Time": v.get("lastUpdatedTime"), + "Execution Start Time": v.get("executionStartTime"), + "Execution End Time": v.get("executionEndTime"), + "Source Stage Id": v.get("sourceStageId"), + "Target Stage Id": v.get("targetStageId"), + "Note": v.get("note", {}).get("content"), + "New Items Count": p.get("newItemsCount"), + "Different Items Count": p.get("differentItemsCount"), + "No Difference Items Count": p.get("noDifferenceItemsCount"), + "Performed By Id": v.get("performedBy", {}).get("id"), + "Performed By Type": v.get("performedBy", {}).get("type"), + } + dfs.append(pd.DataFrame(new_data, index=[0])) + + if dfs: + df = pd.concat(dfs, ignore_index=True) + _update_dataframe_datatypes(dataframe=df, column_map=columns) + + return df + + +def unassign_workspace_from_stage( + deployment_pipeline: str | UUID, + stage: str | UUID, +): + """ + Unassigns the workspace from the specified stage of the specified deployment pipeline. + + This is a wrapper function for the following API: `Deployment Pipelines - Unassign Workspace From Stage `_. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + stage : str | uuid.UUID + The deployment pipeline stage name or ID. + """ + + deployment_pipeline_id = resolve_deployment_pipeline_id( + deployment_pipeline=deployment_pipeline + ) + + def resolve_deployment_pipeline_stage_id( + deployment_pipeline_id: UUID, stage: str | UUID + ): + + dfPS = list_deployment_pipeline_stages( + deployment_pipeline=deployment_pipeline_id + ) + + if _is_valid_uuid(stage): + dfPS_filt = dfPS[dfPS["Deployment Pipeline Stage Id"] == stage] + else: + dfPS_filt = dfPS[dfPS["Deployment Pipeline Stage Name"] == stage] + if dfPS.empty: + raise ValueError( + f"{icons.red_dot} The '{stage}' stage does not exist within the '{deployment_pipeline}' deployment pipeline." + ) + return dfPS_filt["Deployment Pipeline Stage Id"].iloc[0] + + stage_id = resolve_deployment_pipeline_stage_id(deployment_pipeline_id, stage) + + _base_api( + request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/stages/{stage_id}/unassignWorkspace", + method="post", + client="fabric_sp", + ) + + print( + f"{icons.green_dot} The workspace has been unassigned from the '{stage}' stage of the '{deployment_pipeline}' deployment pipeline successfully." + ) diff --git a/src/sempy_labs/_helper_functions.py b/src/sempy_labs/_helper_functions.py index 6a88dd30..63929859 100644 --- a/src/sempy_labs/_helper_functions.py +++ b/src/sempy_labs/_helper_functions.py @@ -1311,36 +1311,6 @@ def pagination(client, response): return responses -def resolve_deployment_pipeline_id(deployment_pipeline: str | UUID) -> UUID: - """ - Obtains the Id for a given deployment pipeline. - - Parameters - ---------- - deployment_pipeline : str | uuid.UUID - The deployment pipeline name or ID. - - Returns - ------- - uuid.UUID - The deployment pipeline Id. - """ - - from sempy_labs._deployment_pipelines import list_deployment_pipelines - - if _is_valid_uuid(deployment_pipeline): - return deployment_pipeline - else: - - dfP = list_deployment_pipelines() - dfP_filt = dfP[dfP["Deployment Pipeline Name"] == deployment_pipeline] - if len(dfP_filt) == 0: - raise ValueError( - f"{icons.red_dot} The '{deployment_pipeline}' deployment pipeline is not valid." - ) - return dfP_filt["Deployment Pipeline Id"].iloc[0] - - class FabricTokenCredential(TokenCredential): def get_token( diff --git a/src/sempy_labs/_sql_endpoints.py b/src/sempy_labs/_sql_endpoints.py index 39f22714..0107c3af 100644 --- a/src/sempy_labs/_sql_endpoints.py +++ b/src/sempy_labs/_sql_endpoints.py @@ -152,25 +152,33 @@ def refresh_sql_endpoint_metadata( df = pd.json_normalize(result) # Extract error code and message, set to None if no error - df['Error Code'] = df.get('error.errorCode', None) - df['Error Message'] = df.get('error.message', None) + df["Error Code"] = df.get("error.errorCode", None) + df["Error Message"] = df.get("error.message", None) # Friendly column renaming - df.rename(columns={ - 'tableName': 'Table Name', - 'startDateTime': 'Start Time', - 'endDateTime': 'End Time', - 'status': 'Status', - 'lastSuccessfulSyncDateTime': 'Last Successful Sync Time' - }, inplace=True) + df.rename( + columns={ + "tableName": "Table Name", + "startDateTime": "Start Time", + "endDateTime": "End Time", + "status": "Status", + "lastSuccessfulSyncDateTime": "Last Successful Sync Time", + }, + inplace=True, + ) # Drop the original 'error' column if present - df.drop(columns=[col for col in ['error'] if col in df.columns], inplace=True) + df.drop(columns=[col for col in ["error"] if col in df.columns], inplace=True) # Optional: Reorder columns column_order = [ - 'Table Name', 'Status', 'Start Time', 'End Time', - 'Last Successful Sync Time', 'Error Code', 'Error Message' + "Table Name", + "Status", + "Start Time", + "End Time", + "Last Successful Sync Time", + "Error Code", + "Error Message", ] df = df[column_order] diff --git a/src/sempy_labs/admin/__init__.py b/src/sempy_labs/admin/__init__.py index ca4adbf0..3ae7fd41 100644 --- a/src/sempy_labs/admin/__init__.py +++ b/src/sempy_labs/admin/__init__.py @@ -6,7 +6,7 @@ add_user_to_workspace, delete_user_from_workspace, restore_deleted_workspace, - list_orphaned_workspaces + list_orphaned_workspaces, ) from sempy_labs.admin._artifacts import ( list_unused_artifacts, diff --git a/src/sempy_labs/admin/_workspaces.py b/src/sempy_labs/admin/_workspaces.py index 866e7435..a78df7bf 100644 --- a/src/sempy_labs/admin/_workspaces.py +++ b/src/sempy_labs/admin/_workspaces.py @@ -3,8 +3,7 @@ _build_url, _encode_user, _update_dataframe_datatypes, - _create_dataframe - + _create_dataframe, ) from uuid import UUID @@ -152,11 +151,12 @@ def restore_deleted_workspace(workspace_id: UUID, name: str, email_address: str) f"{icons.green_dot} The '{workspace_id}' workspace has been restored as '{name}'." ) + def list_orphaned_workspaces(top: int = 100) -> pd.DataFrame: """ Shows a list of orphaned workspaces (those with no users or no admins). - This is a wrapper function for the following API: + This is a wrapper function for the following API: `Admin - Groups ListGroupsAsAdmin `_. Service Principal Authentication is supported (see `here `_ for examples). @@ -171,12 +171,12 @@ def list_orphaned_workspaces(top: int = 100) -> pd.DataFrame: pandas.DataFrame A pandas dataframe showing a list of orphaned workspaces. """ - + # column structure with proper data types columns = { "Workspace Name": "string", "Workspace Id": "string", - "Type": "string", + "Type": "string", "State": "string", "Is Read Only": "bool", "Is On Dedicated Capacity": "bool", @@ -184,9 +184,9 @@ def list_orphaned_workspaces(top: int = 100) -> pd.DataFrame: "Has Workspace Level Settings": "bool", "Users": "list", } - + df = _create_dataframe(columns=columns) - + url = ( "/v1.0/myorg/admin/groups?" "$expand=users&" @@ -198,29 +198,33 @@ def list_orphaned_workspaces(top: int = 100) -> pd.DataFrame: response = _base_api(request=url, client="fabric_sp") values = response.json().get("value", []) df_raw = pd.json_normalize(values) - + # friendly names and reorder if not df_raw.empty: - df_raw = df_raw.rename(columns={ - "name": "Workspace Name", - "id": "Workspace Id", - "type": "Type", - "state": "State", - "isReadOnly": "Is Read Only", - "isOnDedicatedCapacity": "Is On Dedicated Capacity", - "capacityMigrationStatus": "Capacity Migration Status", - "hasWorkspaceLevelSettings ": "Has Workspace Level Settings", # Note the space in original - "users": "Users" - }) - + df_raw = df_raw.rename( + columns={ + "name": "Workspace Name", + "id": "Workspace Id", + "type": "Type", + "state": "State", + "isReadOnly": "Is Read Only", + "isOnDedicatedCapacity": "Is On Dedicated Capacity", + "capacityMigrationStatus": "Capacity Migration Status", + "hasWorkspaceLevelSettings ": "Has Workspace Level Settings", # Note the space in original + "users": "Users", + } + ) + df = df_raw[list(columns.keys())].copy() - + # Convert empty lists to a more readable format for Users column - if 'Users' in df.columns: - df['Users'] = df['Users'].apply(lambda x: x if (x is not None and len(x) > 0) else []) + if "Users" in df.columns: + df["Users"] = df["Users"].apply( + lambda x: x if (x is not None and len(x) > 0) else [] + ) else: df = _create_dataframe(columns=columns) - + # proper data types _update_dataframe_datatypes(dataframe=df, column_map=columns) diff --git a/src/sempy_labs/deployment_pipeline/__init__.py b/src/sempy_labs/deployment_pipeline/__init__.py new file mode 100644 index 00000000..0e166165 --- /dev/null +++ b/src/sempy_labs/deployment_pipeline/__init__.py @@ -0,0 +1,19 @@ +from ._dp import ( + list, + list_stages, + list_stage_items, + list_role_assignments, + delete, + list_operations, + unassign_workspace_from_stage, +) + +__all__ = [ + "list", + "list_stages", + "list_stage_items", + "list_role_assignments", + "delete", + "list_operations", + "unassign_workspace_from_stage", +] diff --git a/src/sempy_labs/deployment_pipeline/_dp.py b/src/sempy_labs/deployment_pipeline/_dp.py new file mode 100644 index 00000000..88506ace --- /dev/null +++ b/src/sempy_labs/deployment_pipeline/_dp.py @@ -0,0 +1,402 @@ +import pandas as pd +from sempy_labs._helper_functions import ( + _is_valid_uuid, + _base_api, + _update_dataframe_datatypes, + _create_dataframe, +) +from sempy._utils._log import log +import sempy_labs._icons as icons +from uuid import UUID + + +def resolve_deployment_pipeline_id(deployment_pipeline: str | UUID) -> UUID: + """ + Obtains the Id for a given deployment pipeline. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + + Returns + ------- + uuid.UUID + The deployment pipeline Id. + """ + + if _is_valid_uuid(deployment_pipeline): + return deployment_pipeline + else: + dfP = list() + dfP_filt = dfP[dfP["Deployment Pipeline Name"] == deployment_pipeline] + if len(dfP_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{deployment_pipeline}' deployment pipeline is not valid." + ) + return dfP_filt["Deployment Pipeline Id"].iloc[0] + + +def resolve_stage_id(deployment_pipeline_id: UUID, stage: str | UUID): + + dfPS = list_stages(deployment_pipeline=deployment_pipeline_id) + + if _is_valid_uuid(stage): + dfPS_filt = dfPS[dfPS["Deployment Pipeline Stage Id"] == stage] + else: + dfPS_filt = dfPS[dfPS["Deployment Pipeline Stage Name"] == stage] + if dfPS.empty: + raise ValueError( + f"{icons.red_dot} The '{stage}' stage does not exist within the '{deployment_pipeline_id}' deployment pipeline." + ) + return dfPS_filt["Deployment Pipeline Stage Id"].iloc[0] + + +@log +def list() -> pd.DataFrame: + """ + Shows a list of deployment pipelines the user can access. + + This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipelines `_. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing a list of deployment pipelines the user can access. + """ + + columns = { + "Deployment Pipeline Id": "string", + "Deployment Pipeline Name": "string", + "Description": "string", + } + df = _create_dataframe(columns=columns) + + responses = _base_api( + request="/v1/deploymentPipelines", + status_codes=200, + uses_pagination=True, + ) + + for r in responses: + for v in r.get("value", []): + new_data = { + "Deployment Pipeline Id": v.get("id"), + "Deployment Pipeline Name": v.get("displayName"), + "Description": v.get("description"), + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + return df + + +@log +def list_stages(deployment_pipeline: str | UUID) -> pd.DataFrame: + """ + Shows the specified deployment pipeline stages. + + This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipeline Stages `_. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing the specified deployment pipeline stages. + """ + + columns = { + "Deployment Pipeline Stage Id": "string", + "Deployment Pipeline Stage Name": "string", + "Order": "int", + "Description": "string", + "Workspace Id": "string", + "Workspace Name": "string", + "Public": "bool", + } + df = _create_dataframe(columns=columns) + + deployment_pipeline_id = resolve_deployment_pipeline_id( + deployment_pipeline=deployment_pipeline + ) + + responses = _base_api( + request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/stages", + status_codes=200, + uses_pagination=True, + ) + + for r in responses: + for v in r.get("value", []): + new_data = { + "Deployment Pipeline Stage Id": v.get("id"), + "Deployment Pipeline Stage Name": v.get("displayName"), + "Description": v.get("description"), + "Order": v.get("order"), + "Workspace Id": v.get("workspaceId"), + "Workspace Name": v.get("workspaceName"), + "Public": v.get("isPublic"), + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + _update_dataframe_datatypes(dataframe=df, column_map=columns) + + return df + + +@log +def list_stage_items( + deployment_pipeline: str | UUID, + stage: str | UUID, +) -> pd.DataFrame: + """ + Shows the supported items from the workspace assigned to the specified stage of the specified deployment pipeline. + + This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipeline Stage Items `_. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + stage : str | uuid.UUID + The deployment pipeline stage name or ID. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing the supported items from the workspace assigned to the specified stage of the specified deployment pipeline. + """ + + columns = { + "Deployment Pipeline Stage Item Id": "string", + "Deployment Pipeline Stage Item Name": "string", + "Item Type": "string", + "Source Item Id": "string", + "Target Item Id": "string", + "Last Deployment Time": "string", + } + df = _create_dataframe(columns=columns) + + deployment_pipeline_id = resolve_deployment_pipeline_id( + deployment_pipeline=deployment_pipeline + ) + + stage_id = resolve_stage_id(deployment_pipeline_id, stage) + + responses = _base_api( + request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/stages/{stage_id}/items", + status_codes=200, + uses_pagination=True, + ) + + for r in responses: + for v in r.get("value", []): + new_data = { + "Deployment Pipeline Stage Item Id": v.get("itemId"), + "Deployment Pipeline Stage Item Name": v.get("itemDisplayName"), + "Item Type": v.get("itemType"), + "Source Item Id": v.get("sourceItemId"), + "Target Item Id": v.get("targetItemId"), + "Last Deployment Time": v.get("lastDeploymentTime"), + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + return df + + +@log +def list_role_assignments( + deployment_pipeline: str | UUID, +) -> pd.DataFrame: + """ + Shows the role assignments for the specified deployment pipeline. + + This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipeline Role Assignments `_. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing the role assignments for the specified deployment pipeline. + """ + + columns = { + "Role": "string", + "Principal Id": "string", + "Principal Type": "string", + } + df = _create_dataframe(columns=columns) + + deployment_pipeline_id = resolve_deployment_pipeline_id( + deployment_pipeline=deployment_pipeline + ) + + responses = _base_api( + request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/roleAssignments", + uses_pagination=True, + client="fabric_sp", + ) + + dfs = [] + + for r in responses: + for v in r.get("value", []): + principal = v.get("principal", {}) + new_data = { + "Role": v.get("role"), + "Principal Id": principal.get("id"), + "Principal Type Name": principal.get("type"), + } + dfs.append(pd.DataFrame(new_data, index=[0])) + + if dfs: + df = pd.concat(dfs, ignore_index=True) + + return df + + +@log +def delete( + deployment_pipeline: str | UUID, +): + """ + Deletes the specified deployment pipeline. + + This is a wrapper function for the following API: `Deployment Pipelines - Delete Deployment Pipeline `_. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + """ + + deployment_pipeline_id = resolve_deployment_pipeline_id( + deployment_pipeline=deployment_pipeline + ) + + _base_api( + request=f"/v1/deploymentPipelines/{deployment_pipeline_id}", + method="delete", + client="fabric_sp", + ) + + print( + f"{icons.green_dot} The '{deployment_pipeline}' deployment pipeline has been deleted successfully." + ) + + +def list_operations( + deployment_pipeline: str | UUID, +) -> pd.DataFrame: + """ + Shows the operations for the specified deployment pipeline. + + This is a wrapper function for the following API: `Deployment Pipelines - List Deployment Pipeline Operations `_. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing the operations for the specified deployment pipeline. + """ + + columns = { + "Operation Id": "string", + "Type": "string", + "Status": "string", + "Last Updated Time": "string", + "Execution Start Time": "datetime_coerce", + "Execution End Time": "datetime_coerce", + "Source Stage Id": "string", + "Target Stage Id": "string", + "Note": "string", + "New Items Count": "int", + "Different Items Count": "int", + "No Difference Items Count": "int", + "Performed By Id": "string", + "Performed By Type": "string", + } + df = _create_dataframe(columns=columns) + + deployment_pipeline_id = resolve_deployment_pipeline_id( + deployment_pipeline=deployment_pipeline + ) + + responses = _base_api( + request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/operations", + uses_pagination=True, + client="fabric_sp", + ) + + dfs = [] + for r in responses: + for v in r.get("value", []): + p = v.get("preDeploymentDiffInformation", {}) + new_data = { + "Operation Id": v.get("id"), + "Type": v.get("type"), + "Status": v.get("status"), + "Last Updated Time": v.get("lastUpdatedTime"), + "Execution Start Time": v.get("executionStartTime"), + "Execution End Time": v.get("executionEndTime"), + "Source Stage Id": v.get("sourceStageId"), + "Target Stage Id": v.get("targetStageId"), + "Note": v.get("note", {}).get("content"), + "New Items Count": p.get("newItemsCount"), + "Different Items Count": p.get("differentItemsCount"), + "No Difference Items Count": p.get("noDifferenceItemsCount"), + "Performed By Id": v.get("performedBy", {}).get("id"), + "Performed By Type": v.get("performedBy", {}).get("type"), + } + dfs.append(pd.DataFrame(new_data, index=[0])) + + if dfs: + df = pd.concat(dfs, ignore_index=True) + _update_dataframe_datatypes(dataframe=df, column_map=columns) + + return df + + +def unassign_workspace_from_stage( + deployment_pipeline: str | UUID, + stage: str | UUID, +): + """ + Unassigns the workspace from the specified stage of the specified deployment pipeline. + + This is a wrapper function for the following API: `Deployment Pipelines - Unassign Workspace From Stage `_. + + Parameters + ---------- + deployment_pipeline : str | uuid.UUID + The deployment pipeline name or ID. + stage : str | uuid.UUID + The deployment pipeline stage name or ID. + """ + + deployment_pipeline_id = resolve_deployment_pipeline_id( + deployment_pipeline=deployment_pipeline + ) + + stage_id = resolve_stage_id(deployment_pipeline_id, stage) + + _base_api( + request=f"/v1/deploymentPipelines/{deployment_pipeline_id}/stages/{stage_id}/unassignWorkspace", + method="post", + client="fabric_sp", + ) + + print( + f"{icons.green_dot} The workspace has been unassigned from the '{stage}' stage of the '{deployment_pipeline}' deployment pipeline successfully." + )