-
-
Notifications
You must be signed in to change notification settings - Fork 31
[WIP] : improve current timing script #114
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
akshitasure12
wants to merge
24
commits into
networkx:main
Choose a base branch
from
akshitasure12:improve-timing
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
+154
−102
Open
Changes from all commits
Commits
Show all changes
24 commits
Select commit
Hold shift + click to select a range
eb7b543
updated pyproject.toml
akshitasure12 4e010d7
minimal improvements
akshitasure12 146f25f
modularized the script
akshitasure12 d0935a8
removed function calls
akshitasure12 c85e4c7
added next func record_result
akshitasure12 31b8360
switched to enumerate
akshitasure12 1b3f2ee
Added number of trials for minimal noise
akshitasure12 c2ed666
increased the number of nodes to test
akshitasure12 7c15124
setting ticks
akshitasure12 66c234a
Move back numpy and scipy to test dependencies
akshitasure12 6b666bb
working with shared memory
akshitasure12 49e35d8
Revert "working with shared memory"
akshitasure12 1a1e629
use timeit.repeat() instead of perf_counter()
akshitasure12 234cc43
simplified bipartite logic
akshitasure12 4ec2ab2
optimised the visualization
akshitasure12 b843714
minor edits
akshitasure12 924bc27
removed timing_all_functions
akshitasure12 f45627d
removed timing_comparision.md
akshitasure12 aab4245
Revert "removed timing_comparision.md"
akshitasure12 79cf08c
Revert "removed timing_all_functions"
akshitasure12 3edf1d8
update installation text
akshitasure12 3159bdb
final touches
akshitasure12 d037437
improve legend
akshitasure12 f2d9970
minor edit
akshitasure12 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
File renamed without changes.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,110 +1,157 @@ | ||
import time | ||
""" | ||
To generate heatmaps comparing the performance of nx-parallel and NetworkX implementations, make sure to run: | ||
python3 -m pip install -e '.[heatmap]' | ||
""" | ||
|
||
import networkx as nx | ||
import pandas as pd | ||
import nx_parallel as nxp | ||
from matplotlib import pyplot as plt, patches as mpatches | ||
import seaborn as sns | ||
from matplotlib import pyplot as plt | ||
import numpy as np | ||
import pandas as pd | ||
import timeit | ||
import random | ||
import types | ||
|
||
import nx_parallel as nxp | ||
seed = random.Random(42) | ||
tournament_funcs = ["is_reachable", "tournament_is_strongly_connected"] | ||
bipartite_funcs = ["node_redundancy"] | ||
|
||
# Code to create README heatmaps for individual function currFun | ||
heatmapDF = pd.DataFrame() | ||
# for bipartite graphs | ||
# n = [50, 100, 200, 400] | ||
# m = [25, 50, 100, 200] | ||
number_of_nodes_list = [200, 400, 800, 1600] | ||
weighted = False | ||
pList = [1, 0.8, 0.6, 0.4, 0.2] | ||
currFun = nx.tournament.is_reachable | ||
""" | ||
for p in pList: | ||
for num in range(len(number_of_nodes_list)): | ||
# create original and parallel graphs | ||
G = nx.fast_gnp_random_graph( | ||
number_of_nodes_list[num], p, seed=42, directed=True | ||
) | ||
|
||
|
||
# for bipartite.node_redundancy | ||
G = nx.bipartite.random_graph(n[num], m[num], p, seed=42, directed=True) | ||
for i in G.nodes: | ||
l = list(G.neighbors(i)) | ||
if len(l) == 0: | ||
v = random.choice(list(G.nodes) - [i,]) | ||
G.add_edge(i, v) | ||
G.add_edge(i, random.choice([node for node in G.nodes if node != i])) | ||
elif len(l) == 1: | ||
G.add_edge(i, random.choice([node for node in G.nodes if node != i and node not in list(G.neighbors(i))])) | ||
|
||
# for weighted graphs | ||
if weighted: | ||
random.seed(42) | ||
for u, v in G.edges(): | ||
G[u][v]["weight"] = random.random() | ||
|
||
H = nxp.ParallelGraph(G) | ||
|
||
# time both versions and update heatmapDF | ||
t1 = time.time() | ||
c1 = currFun(H) | ||
if isinstance(c1, types.GeneratorType): | ||
d = dict(c1) | ||
t2 = time.time() | ||
parallelTime = t2 - t1 | ||
t1 = time.time() | ||
c2 = currFun(G) | ||
if isinstance(c2, types.GeneratorType): | ||
d = dict(c2) | ||
t2 = time.time() | ||
stdTime = t2 - t1 | ||
|
||
def time_individual_function( | ||
targetFunc, number_of_nodes, edge_prob, speedup_df, heatmap_annot, *, weighted=False | ||
): | ||
def measure_time(G, *args): | ||
repeat = 5 | ||
|
||
def wrapper(): | ||
result = targetFunc(G, *args) | ||
if isinstance(result, types.GeneratorType): | ||
_ = dict(result) | ||
|
||
times = timeit.repeat(wrapper, repeat=repeat, number=1) | ||
return min(times) | ||
|
||
def record_result(stdTime, parallelTime, row, col): | ||
timesFaster = stdTime / parallelTime | ||
heatmapDF.at[number_of_nodes_list[num], p] = timesFaster | ||
print("Finished " + str(currFun)) | ||
""" | ||
speedup_df.at[row, col] = timesFaster | ||
heatmap_annot.at[row, col] = f"{parallelTime:.2g}s | {timesFaster:.2g}x" | ||
|
||
if targetFunc.__name__ not in tournament_funcs: | ||
for p in edge_prob: | ||
for ind, num in enumerate(number_of_nodes): | ||
# for bipartite graphs | ||
if targetFunc.__name__ in bipartite_funcs: | ||
n = [200, 400, 800, 1600] | ||
m = [100, 200, 400, 800] | ||
print(f"Number of Nodes: {n[ind] + m[ind]}") | ||
G = nx.bipartite.random_graph( | ||
n[ind], m[ind], p, directed=True, seed=seed | ||
) | ||
for cur_node in G.nodes: | ||
neighbors = set(G.neighbors(cur_node)) | ||
# have atleast 2 outgoing edges | ||
while len(neighbors) < 2: | ||
new_neighbor = seed.choice( | ||
[ | ||
node | ||
for node in G.nodes | ||
if node != cur_node and node not in neighbors | ||
] | ||
) | ||
G.add_edge(cur_node, new_neighbor) | ||
neighbors.add(new_neighbor) | ||
else: | ||
print(f"Number of Nodes: {num}") | ||
G = nx.fast_gnp_random_graph(num, p, directed=True, seed=seed) | ||
print(f"Edge Probability: {p}") | ||
|
||
# Code to create for row of heatmap specifically for tournaments | ||
for num in number_of_nodes_list: | ||
print(num) | ||
G = nx.tournament.random_tournament(num, seed=42) | ||
H = nxp.ParallelGraph(G) | ||
t1 = time.time() | ||
c = currFun(H, 1, num) | ||
t2 = time.time() | ||
parallelTime = t2 - t1 | ||
print(parallelTime) | ||
t1 = time.time() | ||
c = currFun(G, 1, num) | ||
t2 = time.time() | ||
stdTime = t2 - t1 | ||
print(stdTime) | ||
timesFaster = stdTime / parallelTime | ||
heatmapDF.at[num, 3] = timesFaster | ||
print("Finished " + str(currFun)) | ||
|
||
# plotting the heatmap with numbers and a green color scheme | ||
plt.figure(figsize=(20, 4)) | ||
hm = sns.heatmap(data=heatmapDF.T, annot=True, cmap="Greens", cbar=True) | ||
|
||
# Remove the tick labels on both axes | ||
hm.set_yticklabels( | ||
[ | ||
3, | ||
# for weighted graphs | ||
if weighted: | ||
seed.random() | ||
for u, v in G.edges(): | ||
G[u][v]["weight"] = random.random() | ||
|
||
H = nxp.ParallelGraph(G) | ||
# time both versions and update speedup_df | ||
parallelTime = measure_time(H) | ||
print(parallelTime) | ||
stdTime = measure_time(G) | ||
print(stdTime) | ||
record_result(stdTime, parallelTime, num, p) | ||
print("Finished " + str(targetFunc)) | ||
else: | ||
# for tournament graphs | ||
for num in number_of_nodes: | ||
print(f"Number of Nodes: {num}") | ||
G = nx.tournament.random_tournament(num, seed=seed) | ||
H = nxp.ParallelGraph(G) | ||
source, target = seed.sample(range(num), 2) | ||
parallelTime = measure_time(H, source, target) | ||
print(parallelTime) | ||
stdTime = measure_time(G, source, target) | ||
print(stdTime) | ||
record_result(stdTime, parallelTime, num, edge_prob[0]) | ||
print("Finished " + str(targetFunc)) | ||
|
||
|
||
def plot_timing_heatmap(targetFunc): | ||
number_of_nodes = ( | ||
[200, 400, 800, 1600] | ||
if targetFunc.__name__ not in bipartite_funcs | ||
else [300, 600, 1200, 2400] | ||
) | ||
edge_prob = ( | ||
[1, 0.8, 0.6, 0.4, 0.2] if targetFunc.__name__ not in tournament_funcs else [1] | ||
) | ||
|
||
speedup_df = pd.DataFrame(index=number_of_nodes, columns=edge_prob, dtype=float) | ||
heatmap_annot = pd.DataFrame(index=number_of_nodes, columns=edge_prob, dtype=object) | ||
|
||
time_individual_function( | ||
targetFunc, number_of_nodes, edge_prob, speedup_df, heatmap_annot | ||
) | ||
|
||
plt.figure(figsize=(20, 6)) | ||
ax = sns.heatmap( | ||
data=speedup_df.T, | ||
annot=heatmap_annot.T, | ||
annot_kws={"size": 12, "weight": "bold"}, | ||
fmt="", | ||
cmap="Greens", | ||
cbar=True, | ||
) | ||
|
||
ax.set_xticks(np.arange(len(number_of_nodes)) + 0.5) | ||
ax.set_xticklabels(number_of_nodes, rotation=45) | ||
ax.set_yticks(np.arange(len(edge_prob)) + 0.5) | ||
ax.set_yticklabels(edge_prob, rotation=20) | ||
|
||
ax.set_xlabel("Number of Vertices", fontweight="bold", fontsize=12) | ||
ax.set_ylabel("Edge Probability", fontweight="bold", fontsize=12) | ||
|
||
n_jobs = nx.config.backends.parallel.n_jobs | ||
ax.set_title( | ||
f"Small Scale Demo: Time Speedups of {targetFunc.__name__} compared to NetworkX on {n_jobs} cores", | ||
fontweight="bold", | ||
fontsize=14, | ||
loc="left", | ||
) | ||
|
||
legend_patches = [ | ||
mpatches.Patch(color="none", label="Left: Parallel runtime (s)"), | ||
mpatches.Patch(color="none", label="Right: Speed-up"), | ||
] | ||
) | ||
|
||
# Adding x-axis labels | ||
hm.set_xticklabels(number_of_nodes_list) | ||
|
||
# Rotating the x-axis labels for better readability (optional) | ||
plt.xticks(rotation=45) | ||
plt.yticks(rotation=20) | ||
plt.title( | ||
"Small Scale Demo: Times Speedups of " + currFun.__name__ + " compared to NetworkX" | ||
) | ||
plt.xlabel("Number of Vertices") | ||
plt.ylabel("Edge Probability") | ||
print(currFun.__name__) | ||
|
||
# displaying the plotted heatmap | ||
plt.tight_layout() | ||
plt.savefig("timing/" + "heatmap_" + currFun.__name__ + "_timing.png") | ||
ax.legend( | ||
handles=legend_patches, | ||
loc="lower right", | ||
bbox_to_anchor=(1.0, 1.02), | ||
title="Cell Values", | ||
prop={"size": 12}, | ||
) | ||
|
||
plt.tight_layout(rect=[0, 0, 1, 0.94]) | ||
plt.savefig("timing/" + "heatmap_" + targetFunc.__name__ + "_timing.png") | ||
|
||
|
||
# plot_timing_heatmap(nx.algorithms.tournament.is_reachable) |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.