Skip to content

Commit b3eb6fb

Browse files
committed
Minor: not add attention_size_swa for non-swa model
1 parent b6c84af commit b3eb6fb

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

ggml/src/ggml-openvino/ggml-decoder.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,9 @@ void GgmlOvDecoder::add_extra_inputs() {
360360
};
361361

362362
create_attention_size_input("attention_size", attention_size);
363-
create_attention_size_input("attention_size_swa", attention_size_swa);
363+
if (attention_size_swa != -1) {
364+
create_attention_size_input("attention_size_swa", attention_size_swa);
365+
}
364366
}
365367

366368
const ggml_tensor* GgmlOvDecoder::get_tensor_used_op(const ggml_tensor* tensor) const {

0 commit comments

Comments
 (0)