@@ -681,18 +681,25 @@ __STATIC_INLINE__ void sd_tiling(ggml_tensor* input, ggml_tensor* output, const
681
681
int tile_overlap_y = (int32_t )(tile_size * tile_overlap_factor_y);
682
682
int non_tile_overlap_y = tile_size - tile_overlap_y;
683
683
684
- int input_tile_size = tile_size;
685
- int output_tile_size = tile_size;
684
+ int tile_size_x = tile_size < small_width ? tile_size : small_width;
685
+ int tile_size_y = tile_size < small_height ? tile_size : small_height;
686
+
687
+ int input_tile_size_x = tile_size_x;
688
+ int input_tile_size_y = tile_size_y;
689
+ int output_tile_size_x = tile_size_x;
690
+ int output_tile_size_y = tile_size_y;
686
691
687
692
if (big_out) {
688
- output_tile_size *= scale;
693
+ output_tile_size_x *= scale;
694
+ output_tile_size_y *= scale;
689
695
} else {
690
- input_tile_size *= scale;
696
+ input_tile_size_x *= scale;
697
+ input_tile_size_y *= scale;
691
698
}
692
699
693
700
struct ggml_init_params params = {};
694
- params.mem_size += input_tile_size * input_tile_size * input->ne [2 ] * sizeof (float ); // input chunk
695
- params.mem_size += output_tile_size * output_tile_size * output->ne [2 ] * sizeof (float ); // output chunk
701
+ params.mem_size += input_tile_size_x * input_tile_size_y * input->ne [2 ] * sizeof (float ); // input chunk
702
+ params.mem_size += output_tile_size_x * output_tile_size_y * output->ne [2 ] * sizeof (float ); // output chunk
696
703
params.mem_size += 3 * ggml_tensor_overhead ();
697
704
params.mem_buffer = NULL ;
698
705
params.no_alloc = false ;
@@ -707,19 +714,19 @@ __STATIC_INLINE__ void sd_tiling(ggml_tensor* input, ggml_tensor* output, const
707
714
}
708
715
709
716
// tiling
710
- ggml_tensor* input_tile = ggml_new_tensor_4d (tiles_ctx, GGML_TYPE_F32, input_tile_size, input_tile_size , input->ne [2 ], 1 );
711
- ggml_tensor* output_tile = ggml_new_tensor_4d (tiles_ctx, GGML_TYPE_F32, output_tile_size, output_tile_size , output->ne [2 ], 1 );
712
- int num_tiles = num_tiles_x * num_tiles_y;
717
+ ggml_tensor* input_tile = ggml_new_tensor_4d (tiles_ctx, GGML_TYPE_F32, input_tile_size_x, input_tile_size_y , input->ne [2 ], 1 );
718
+ ggml_tensor* output_tile = ggml_new_tensor_4d (tiles_ctx, GGML_TYPE_F32, output_tile_size_x, output_tile_size_y , output->ne [2 ], 1 );
719
+ int num_tiles = num_tiles_x * num_tiles_y;
713
720
LOG_DEBUG (" processing %i tiles" , num_tiles);
714
721
pretty_progress (0 , num_tiles, 0 .0f );
715
722
int tile_count = 1 ;
716
723
bool last_y = false , last_x = false ;
717
724
float last_time = 0 .0f ;
718
725
for (int y = 0 ; y < small_height && !last_y; y += non_tile_overlap_y) {
719
726
int dy = 0 ;
720
- if (y + tile_size >= small_height) {
727
+ if (y + tile_size_y >= small_height) {
721
728
int _y = y;
722
- y = small_height - tile_size ;
729
+ y = small_height - tile_size_y ;
723
730
dy = _y - y;
724
731
if (big_out) {
725
732
dy *= scale;
@@ -728,9 +735,9 @@ __STATIC_INLINE__ void sd_tiling(ggml_tensor* input, ggml_tensor* output, const
728
735
}
729
736
for (int x = 0 ; x < small_width && !last_x; x += non_tile_overlap_x) {
730
737
int dx = 0 ;
731
- if (x + tile_size >= small_width) {
738
+ if (x + tile_size_x >= small_width) {
732
739
int _x = x;
733
- x = small_width - tile_size ;
740
+ x = small_width - tile_size_x ;
734
741
dx = _x - x;
735
742
if (big_out) {
736
743
dx *= scale;
0 commit comments