@@ -179,3 +179,122 @@ def test_ignore_pickle_load_exceptions():
179179 # TODO: parameterized test with patched pickle.load that raises the
180180 # various allowable exception from _read_buckets_cache_file
181181 pass
182+
183+
184+ @pytest .mark .skip_on_fips_enabled_platform
185+ def test_prune_deleted_files_multiple_envs_per_bucket (tmp_path , bucket , s3 ):
186+ """Test that _prune_deleted_files works correctly with multiple environments per bucket."""
187+
188+ # Create test files in S3
189+ keys = {
190+ "base/test1.sls" : {"content" : "test1 content" },
191+ "base/test2.sls" : {"content" : "test2 content" },
192+ "dev/test3.sls" : {"content" : "test3 content" },
193+ }
194+ make_keys (bucket , s3 , keys )
195+
196+ # Configure for multiple environments per bucket mode
197+ opts = {
198+ "cachedir" : tmp_path ,
199+ "s3.buckets" : [bucket ], # List mode = multiple environments per bucket
200+ "s3.location" : "us-east-1" ,
201+ "s3.s3_cache_expire" : - 1 ,
202+ }
203+ utils = {"s3.query" : salt .utils .s3 .query }
204+
205+ # Update the module configuration
206+ s3fs .__opts__ = opts
207+ s3fs .__utils__ = utils
208+
209+ # Initial update to populate cache
210+ s3fs .update ()
211+
212+ # Verify files are cached
213+ for key in keys :
214+ env , filename = key .split ("/" , 1 )
215+ cache_file = s3fs ._get_cached_file_name (bucket , env , filename )
216+ assert os .path .exists (cache_file )
217+
218+ # Delete one file from S3
219+ s3 .delete_object (Bucket = bucket , Key = "base/test1.sls" )
220+ del keys ["base/test1.sls" ]
221+
222+ # Update metadata to reflect the deletion
223+ # This simulates what would happen after S3 metadata refresh
224+ metadata = {
225+ "base" : [
226+ {bucket : [{"Key" : "base/test2.sls" }]}
227+ ],
228+ "dev" : [
229+ {bucket : [{"Key" : "dev/test3.sls" }]}
230+ ]
231+ }
232+
233+ # Call _prune_deleted_files directly
234+ s3fs ._prune_deleted_files (metadata )
235+
236+ # Verify that deleted file was removed from cache
237+ deleted_cache_file = s3fs ._get_cached_file_name (bucket , "base" , "test1.sls" )
238+ assert not os .path .exists (deleted_cache_file )
239+
240+ # Verify that remaining files still exist
241+ remaining_cache_file = s3fs ._get_cached_file_name (bucket , "base" , "test2.sls" )
242+ assert os .path .exists (remaining_cache_file )
243+
244+ dev_cache_file = s3fs ._get_cached_file_name (bucket , "dev" , "test3.sls" )
245+ assert os .path .exists (dev_cache_file )
246+
247+
248+ @pytest .mark .skip_on_fips_enabled_platform
249+ def test_prune_deleted_files_single_env_per_bucket (tmp_path , bucket , s3 ):
250+ """Test that _prune_deleted_files works correctly with single environment per bucket."""
251+
252+ # Create test files in S3
253+ keys = {
254+ "test1.sls" : {"content" : "test1 content" },
255+ "test2.sls" : {"content" : "test2 content" },
256+ }
257+ make_keys (bucket , s3 , keys )
258+
259+ # Configure for single environment per bucket mode
260+ opts = {
261+ "cachedir" : tmp_path ,
262+ "s3.buckets" : {"base" : [bucket ]}, # Dict mode = single environment per bucket
263+ "s3.location" : "us-east-1" ,
264+ "s3.s3_cache_expire" : - 1 ,
265+ }
266+ utils = {"s3.query" : salt .utils .s3 .query }
267+
268+ # Update the module configuration
269+ s3fs .__opts__ = opts
270+ s3fs .__utils__ = utils
271+
272+ # Initial update to populate cache
273+ s3fs .update ()
274+
275+ # Verify files are cached
276+ for key in keys :
277+ cache_file = s3fs ._get_cached_file_name (bucket , "base" , key )
278+ assert os .path .exists (cache_file )
279+
280+ # Delete one file from S3
281+ s3 .delete_object (Bucket = bucket , Key = "test1.sls" )
282+ del keys ["test1.sls" ]
283+
284+ # Update metadata to reflect the deletion
285+ metadata = {
286+ "base" : [
287+ {bucket : [{"Key" : "test2.sls" }]}
288+ ]
289+ }
290+
291+ # Call _prune_deleted_files directly
292+ s3fs ._prune_deleted_files (metadata )
293+
294+ # Verify that deleted file was removed from cache
295+ deleted_cache_file = s3fs ._get_cached_file_name (bucket , "base" , "test1.sls" )
296+ assert not os .path .exists (deleted_cache_file )
297+
298+ # Verify that remaining file still exists
299+ remaining_cache_file = s3fs ._get_cached_file_name (bucket , "base" , "test2.sls" )
300+ assert os .path .exists (remaining_cache_file )
0 commit comments