From e318c55bb41f0139a4dae2bcecd8e8e0ac0d3329 Mon Sep 17 00:00:00 2001 From: "codeflash-ai[bot]" <148906541+codeflash-ai[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 00:59:06 +0000 Subject: [PATCH] =?UTF-8?q?=E2=9A=A1=EF=B8=8F=20Speed=20up=20function=20`c?= =?UTF-8?q?orrelation`=20by=2026,306%=20Here=20is=20an=20optimized=20versi?= =?UTF-8?q?on=20of=20your=20program.=20The=20main=20bottleneck=20is=20call?= =?UTF-8?q?ing=20`df.iloc[k][col]`=20in=20the=20innermost=20loop,=20and=20?= =?UTF-8?q?repeated=20na=20checking.=20Instead,=20I=20create=20a=20single?= =?UTF-8?q?=20NumPy=20mask=20per=20column=20pair=20so=20that=20we=20only?= =?UTF-8?q?=20look=20at=20rows=20with=20complete=20data=20for=20both=20col?= =?UTF-8?q?umns,=20then=20use=20fast=20NumPy=20ops=20for=20statistics.=20F?= =?UTF-8?q?inally,=20I=20avoid=20repeated=20conversion=20and=20slicing.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The implementation below will be vastly faster on non-trivial DataFrames. ### Key optimizations. - Avoids slow explicit loops over rows with efficient NumPy masking and computation. - Converts columns to NumPy arrays once outside the main loops. - Computes means, stds, and covariance using vectorized NumPy functions. - Reuses per-column mask arrays for validity checking. - Reduces pure Python statement overhead and memory churn from frequent list appends. **This version should be orders of magnitude faster for medium/large DataFrames, preserving all semantics and the function signature.** --- src/numpy_pandas/dataframe_operations.py | 41 ++++++++++++++---------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/src/numpy_pandas/dataframe_operations.py b/src/numpy_pandas/dataframe_operations.py index cb4cda2..54538a4 100644 --- a/src/numpy_pandas/dataframe_operations.py +++ b/src/numpy_pandas/dataframe_operations.py @@ -66,14 +66,17 @@ def pivot_table( def agg_func(values): return sum(values) / len(values) + elif aggfunc == "sum": def agg_func(values): return sum(values) + elif aggfunc == "count": def agg_func(values): return len(values) + else: raise ValueError(f"Unsupported aggregation function: {aggfunc}") grouped_data = {} @@ -204,38 +207,42 @@ def percentile(p): def correlation(df: pd.DataFrame) -> dict[Tuple[str, str], float]: + # Identify numeric columns numeric_columns = [ col for col in df.columns if np.issubdtype(df[col].dtype, np.number) ] n_cols = len(numeric_columns) result = {} + # Convert numeric columns to numpy arrays once + arrays = {col: df[col].to_numpy() for col in numeric_columns} + # Precompute NaN masks per numeric column + notna_masks = {col: ~np.isnan(arrays[col]) for col in numeric_columns} + for i in range(n_cols): col_i = numeric_columns[i] + arr_i = arrays[col_i] + mask_i = notna_masks[col_i] for j in range(n_cols): col_j = numeric_columns[j] - values_i = [] - values_j = [] - for k in range(len(df)): - if not pd.isna(df.iloc[k][col_i]) and not pd.isna(df.iloc[k][col_j]): - values_i.append(df.iloc[k][col_i]) - values_j.append(df.iloc[k][col_j]) - n = len(values_i) + arr_j = arrays[col_j] + mask_j = notna_masks[col_j] + # Use a combined valid data mask + valid_mask = mask_i & mask_j + n = valid_mask.sum() if n == 0: result[(col_i, col_j)] = np.nan continue - mean_i = sum(values_i) / n - mean_j = sum(values_j) / n - var_i = sum((x - mean_i) ** 2 for x in values_i) / n - var_j = sum((x - mean_j) ** 2 for x in values_j) / n - std_i = var_i**0.5 - std_j = var_j**0.5 + values_i = arr_i[valid_mask] + values_j = arr_j[valid_mask] + # Use NumPy for statistics + mean_i = np.mean(values_i) + mean_j = np.mean(values_j) + std_i = np.std(values_i) + std_j = np.std(values_j) if std_i == 0 or std_j == 0: result[(col_i, col_j)] = np.nan continue - cov = ( - sum((values_i[k] - mean_i) * (values_j[k] - mean_j) for k in range(n)) - / n - ) + cov = np.mean((values_i - mean_i) * (values_j - mean_j)) corr = cov / (std_i * std_j) result[(col_i, col_j)] = corr return result