@@ -1792,13 +1792,104 @@ int cloudsync_commit_alter (cloudsync_context *data, const char *table_name) {
17921792 return rc ;
17931793}
17941794
1795+ // MARK: - Filter Rewrite -
1796+
1797+ // Replace bare column names in a filter expression with prefix-qualified names.
1798+ // E.g., filter="user_id = 42", prefix="NEW", columns=["user_id","id"] → "NEW.\"user_id\" = 42"
1799+ // Columns must be sorted by length descending by the caller to avoid partial matches.
1800+ // Skips content inside single-quoted string literals.
1801+ // Returns a newly allocated string (caller must free with cloudsync_memory_free), or NULL on error.
1802+ // Helper: check if an identifier token matches a column name.
1803+ static bool filter_is_column (const char * token , size_t token_len , char * * columns , int ncols ) {
1804+ for (int i = 0 ; i < ncols ; ++ i ) {
1805+ if (strlen (columns [i ]) == token_len && strncmp (token , columns [i ], token_len ) == 0 )
1806+ return true;
1807+ }
1808+ return false;
1809+ }
1810+
1811+ // Helper: check if character is part of a SQL identifier.
1812+ static bool filter_is_ident_char (char c ) {
1813+ return (c >= 'a' && c <= 'z' ) || (c >= 'A' && c <= 'Z' ) ||
1814+ (c >= '0' && c <= '9' ) || c == '_' ;
1815+ }
1816+
1817+ char * cloudsync_filter_add_row_prefix (const char * filter , const char * prefix , char * * columns , int ncols ) {
1818+ if (!filter || !prefix || !columns || ncols <= 0 ) return NULL ;
1819+
1820+ size_t filter_len = strlen (filter );
1821+ size_t prefix_len = strlen (prefix );
1822+
1823+ // Each identifier match grows by at most (prefix_len + 3) bytes.
1824+ // Worst case: the entire filter is one repeated column reference separated by
1825+ // single characters, so up to (filter_len / 2) matches. Use a safe upper bound.
1826+ size_t max_growth = (filter_len / 2 + 1 ) * (prefix_len + 3 );
1827+ size_t cap = filter_len + max_growth + 64 ;
1828+ char * result = (char * )cloudsync_memory_alloc (cap );
1829+ if (!result ) return NULL ;
1830+ size_t out = 0 ;
1831+
1832+ // Single pass: tokenize into identifiers, quoted strings, and everything else.
1833+ size_t i = 0 ;
1834+ while (i < filter_len ) {
1835+ // Skip single-quoted string literals verbatim (handle '' escape)
1836+ if (filter [i ] == '\'' ) {
1837+ result [out ++ ] = filter [i ++ ];
1838+ while (i < filter_len ) {
1839+ if (filter [i ] == '\'' ) {
1840+ result [out ++ ] = filter [i ++ ];
1841+ // '' is an escaped quote — keep going
1842+ if (i < filter_len && filter [i ] == '\'' ) {
1843+ result [out ++ ] = filter [i ++ ];
1844+ continue ;
1845+ }
1846+ break ; // single ' ends the literal
1847+ }
1848+ result [out ++ ] = filter [i ++ ];
1849+ }
1850+ continue ;
1851+ }
1852+
1853+ // Extract identifier token
1854+ if (filter_is_ident_char (filter [i ])) {
1855+ size_t start = i ;
1856+ while (i < filter_len && filter_is_ident_char (filter [i ])) ++ i ;
1857+ size_t token_len = i - start ;
1858+
1859+ if (filter_is_column (& filter [start ], token_len , columns , ncols )) {
1860+ // Emit PREFIX."column_name"
1861+ memcpy (& result [out ], prefix , prefix_len ); out += prefix_len ;
1862+ result [out ++ ] = '.' ;
1863+ result [out ++ ] = '"' ;
1864+ memcpy (& result [out ], & filter [start ], token_len ); out += token_len ;
1865+ result [out ++ ] = '"' ;
1866+ } else {
1867+ // Not a column — copy as-is
1868+ memcpy (& result [out ], & filter [start ], token_len ); out += token_len ;
1869+ }
1870+ continue ;
1871+ }
1872+
1873+ // Any other character — copy as-is
1874+ result [out ++ ] = filter [i ++ ];
1875+ }
1876+
1877+ result [out ] = '\0' ;
1878+ return result ;
1879+ }
1880+
17951881int cloudsync_refill_metatable (cloudsync_context * data , const char * table_name ) {
17961882 cloudsync_table_context * table = table_lookup (data , table_name );
17971883 if (!table ) return DBRES_ERROR ;
1798-
1884+
17991885 dbvm_t * vm = NULL ;
18001886 int64_t db_version = cloudsync_dbversion_next (data , CLOUDSYNC_VALUE_NOTSET );
18011887
1888+ // Read row-level filter from settings (if any)
1889+ char filter_buf [2048 ];
1890+ int frc = dbutils_table_settings_get_value (data , table_name , "*" , "filter" , filter_buf , sizeof (filter_buf ));
1891+ const char * filter = (frc == DBRES_OK && filter_buf [0 ]) ? filter_buf : NULL ;
1892+
18021893 const char * schema = table -> schema ? table -> schema : "" ;
18031894 char * sql = sql_build_pk_collist_query (schema , table_name );
18041895 char * pkclause_identifiers = NULL ;
@@ -1808,18 +1899,22 @@ int cloudsync_refill_metatable (cloudsync_context *data, const char *table_name)
18081899 char * pkvalues_identifiers = (pkclause_identifiers ) ? pkclause_identifiers : "rowid" ;
18091900
18101901 // Use database-specific query builder to handle type differences in composite PKs
1811- sql = sql_build_insert_missing_pks_query (schema , table_name , pkvalues_identifiers , table -> base_ref , table -> meta_ref );
1902+ sql = sql_build_insert_missing_pks_query (schema , table_name , pkvalues_identifiers , table -> base_ref , table -> meta_ref , filter );
18121903 if (!sql ) {rc = DBRES_NOMEM ; goto finalize ;}
18131904 rc = database_exec (data , sql );
18141905 cloudsync_memory_free (sql );
18151906 if (rc != DBRES_OK ) goto finalize ;
1816-
1907+
18171908 // fill missing colums
18181909 // for each non-pk column:
18191910 // The new query does 1 encode per source row and one indexed NOT-EXISTS probe.
1820- // The old plan does many decodes per candidate and can’t use an index to rule out matches quickly—so it burns CPU and I/O.
1821-
1822- sql = cloudsync_memory_mprintf (SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL , pkvalues_identifiers , table -> base_ref , table -> meta_ref );
1911+ // The old plan does many decodes per candidate and can't use an index to rule out matches quickly—so it burns CPU and I/O.
1912+
1913+ if (filter ) {
1914+ sql = cloudsync_memory_mprintf (SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL_FILTERED , pkvalues_identifiers , table -> base_ref , filter , table -> meta_ref );
1915+ } else {
1916+ sql = cloudsync_memory_mprintf (SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL , pkvalues_identifiers , table -> base_ref , table -> meta_ref );
1917+ }
18231918 rc = databasevm_prepare (data , sql , (void * * )& vm , DBFLAG_PERSISTENT );
18241919 cloudsync_memory_free (sql );
18251920 if (rc != DBRES_OK ) goto finalize ;
@@ -2723,8 +2818,13 @@ int cloudsync_init_table (cloudsync_context *data, const char *table_name, const
27232818 // sync algo with table (unused in this version)
27242819 // cloudsync_sync_table_key(data, table_name, "*", CLOUDSYNC_KEY_ALGO, crdt_algo_name(algo_new));
27252820
2821+ // read row-level filter from settings (if any)
2822+ char init_filter_buf [2048 ];
2823+ int init_frc = dbutils_table_settings_get_value (data , table_name , "*" , "filter" , init_filter_buf , sizeof (init_filter_buf ));
2824+ const char * init_filter = (init_frc == DBRES_OK && init_filter_buf [0 ]) ? init_filter_buf : NULL ;
2825+
27262826 // check triggers
2727- rc = database_create_triggers (data , table_name , algo_new );
2827+ rc = database_create_triggers (data , table_name , algo_new , init_filter );
27282828 if (rc != DBRES_OK ) return cloudsync_set_error (data , "An error occurred while creating triggers" , DBRES_MISUSE );
27292829
27302830 // check meta-table
0 commit comments