@@ -1794,13 +1794,104 @@ int cloudsync_commit_alter (cloudsync_context *data, const char *table_name) {
17941794 return rc ;
17951795}
17961796
1797+ // MARK: - Filter Rewrite -
1798+
1799+ // Replace bare column names in a filter expression with prefix-qualified names.
1800+ // E.g., filter="user_id = 42", prefix="NEW", columns=["user_id","id"] → "NEW.\"user_id\" = 42"
1801+ // Columns must be sorted by length descending by the caller to avoid partial matches.
1802+ // Skips content inside single-quoted string literals.
1803+ // Returns a newly allocated string (caller must free with cloudsync_memory_free), or NULL on error.
1804+ // Helper: check if an identifier token matches a column name.
1805+ static bool filter_is_column (const char * token , size_t token_len , char * * columns , int ncols ) {
1806+ for (int i = 0 ; i < ncols ; ++ i ) {
1807+ if (strlen (columns [i ]) == token_len && strncmp (token , columns [i ], token_len ) == 0 )
1808+ return true;
1809+ }
1810+ return false;
1811+ }
1812+
1813+ // Helper: check if character is part of a SQL identifier.
1814+ static bool filter_is_ident_char (char c ) {
1815+ return (c >= 'a' && c <= 'z' ) || (c >= 'A' && c <= 'Z' ) ||
1816+ (c >= '0' && c <= '9' ) || c == '_' ;
1817+ }
1818+
1819+ char * cloudsync_filter_add_row_prefix (const char * filter , const char * prefix , char * * columns , int ncols ) {
1820+ if (!filter || !prefix || !columns || ncols <= 0 ) return NULL ;
1821+
1822+ size_t filter_len = strlen (filter );
1823+ size_t prefix_len = strlen (prefix );
1824+
1825+ // Each identifier match grows by at most (prefix_len + 3) bytes.
1826+ // Worst case: the entire filter is one repeated column reference separated by
1827+ // single characters, so up to (filter_len / 2) matches. Use a safe upper bound.
1828+ size_t max_growth = (filter_len / 2 + 1 ) * (prefix_len + 3 );
1829+ size_t cap = filter_len + max_growth + 64 ;
1830+ char * result = (char * )cloudsync_memory_alloc (cap );
1831+ if (!result ) return NULL ;
1832+ size_t out = 0 ;
1833+
1834+ // Single pass: tokenize into identifiers, quoted strings, and everything else.
1835+ size_t i = 0 ;
1836+ while (i < filter_len ) {
1837+ // Skip single-quoted string literals verbatim (handle '' escape)
1838+ if (filter [i ] == '\'' ) {
1839+ result [out ++ ] = filter [i ++ ];
1840+ while (i < filter_len ) {
1841+ if (filter [i ] == '\'' ) {
1842+ result [out ++ ] = filter [i ++ ];
1843+ // '' is an escaped quote — keep going
1844+ if (i < filter_len && filter [i ] == '\'' ) {
1845+ result [out ++ ] = filter [i ++ ];
1846+ continue ;
1847+ }
1848+ break ; // single ' ends the literal
1849+ }
1850+ result [out ++ ] = filter [i ++ ];
1851+ }
1852+ continue ;
1853+ }
1854+
1855+ // Extract identifier token
1856+ if (filter_is_ident_char (filter [i ])) {
1857+ size_t start = i ;
1858+ while (i < filter_len && filter_is_ident_char (filter [i ])) ++ i ;
1859+ size_t token_len = i - start ;
1860+
1861+ if (filter_is_column (& filter [start ], token_len , columns , ncols )) {
1862+ // Emit PREFIX."column_name"
1863+ memcpy (& result [out ], prefix , prefix_len ); out += prefix_len ;
1864+ result [out ++ ] = '.' ;
1865+ result [out ++ ] = '"' ;
1866+ memcpy (& result [out ], & filter [start ], token_len ); out += token_len ;
1867+ result [out ++ ] = '"' ;
1868+ } else {
1869+ // Not a column — copy as-is
1870+ memcpy (& result [out ], & filter [start ], token_len ); out += token_len ;
1871+ }
1872+ continue ;
1873+ }
1874+
1875+ // Any other character — copy as-is
1876+ result [out ++ ] = filter [i ++ ];
1877+ }
1878+
1879+ result [out ] = '\0' ;
1880+ return result ;
1881+ }
1882+
17971883int cloudsync_refill_metatable (cloudsync_context * data , const char * table_name ) {
17981884 cloudsync_table_context * table = table_lookup (data , table_name );
17991885 if (!table ) return DBRES_ERROR ;
1800-
1886+
18011887 dbvm_t * vm = NULL ;
18021888 int64_t db_version = cloudsync_dbversion_next (data , CLOUDSYNC_VALUE_NOTSET );
18031889
1890+ // Read row-level filter from settings (if any)
1891+ char filter_buf [2048 ];
1892+ int frc = dbutils_table_settings_get_value (data , table_name , "*" , "filter" , filter_buf , sizeof (filter_buf ));
1893+ const char * filter = (frc == DBRES_OK && filter_buf [0 ]) ? filter_buf : NULL ;
1894+
18041895 const char * schema = table -> schema ? table -> schema : "" ;
18051896 char * sql = sql_build_pk_collist_query (schema , table_name );
18061897 char * pkclause_identifiers = NULL ;
@@ -1810,18 +1901,22 @@ int cloudsync_refill_metatable (cloudsync_context *data, const char *table_name)
18101901 char * pkvalues_identifiers = (pkclause_identifiers ) ? pkclause_identifiers : "rowid" ;
18111902
18121903 // Use database-specific query builder to handle type differences in composite PKs
1813- sql = sql_build_insert_missing_pks_query (schema , table_name , pkvalues_identifiers , table -> base_ref , table -> meta_ref );
1904+ sql = sql_build_insert_missing_pks_query (schema , table_name , pkvalues_identifiers , table -> base_ref , table -> meta_ref , filter );
18141905 if (!sql ) {rc = DBRES_NOMEM ; goto finalize ;}
18151906 rc = database_exec (data , sql );
18161907 cloudsync_memory_free (sql );
18171908 if (rc != DBRES_OK ) goto finalize ;
1818-
1909+
18191910 // fill missing colums
18201911 // for each non-pk column:
18211912 // The new query does 1 encode per source row and one indexed NOT-EXISTS probe.
1822- // The old plan does many decodes per candidate and can’t use an index to rule out matches quickly—so it burns CPU and I/O.
1823-
1824- sql = cloudsync_memory_mprintf (SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL , pkvalues_identifiers , table -> base_ref , table -> meta_ref );
1913+ // The old plan does many decodes per candidate and can't use an index to rule out matches quickly—so it burns CPU and I/O.
1914+
1915+ if (filter ) {
1916+ sql = cloudsync_memory_mprintf (SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL_FILTERED , pkvalues_identifiers , table -> base_ref , filter , table -> meta_ref );
1917+ } else {
1918+ sql = cloudsync_memory_mprintf (SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL , pkvalues_identifiers , table -> base_ref , table -> meta_ref );
1919+ }
18251920 rc = databasevm_prepare (data , sql , (void * * )& vm , DBFLAG_PERSISTENT );
18261921 cloudsync_memory_free (sql );
18271922 if (rc != DBRES_OK ) goto finalize ;
@@ -2723,8 +2818,13 @@ int cloudsync_init_table (cloudsync_context *data, const char *table_name, const
27232818 // sync algo with table (unused in this version)
27242819 // cloudsync_sync_table_key(data, table_name, "*", CLOUDSYNC_KEY_ALGO, crdt_algo_name(algo_new));
27252820
2821+ // read row-level filter from settings (if any)
2822+ char init_filter_buf [2048 ];
2823+ int init_frc = dbutils_table_settings_get_value (data , table_name , "*" , "filter" , init_filter_buf , sizeof (init_filter_buf ));
2824+ const char * init_filter = (init_frc == DBRES_OK && init_filter_buf [0 ]) ? init_filter_buf : NULL ;
2825+
27262826 // check triggers
2727- rc = database_create_triggers (data , table_name , algo_new );
2827+ rc = database_create_triggers (data , table_name , algo_new , init_filter );
27282828 if (rc != DBRES_OK ) return cloudsync_set_error (data , "An error occurred while creating triggers" , DBRES_MISUSE );
27292829
27302830 // check meta-table
0 commit comments