Skip to content

Commit ab55511

Browse files
author
Judd
committed
fix warnings
1 parent a7383cd commit ab55511

File tree

3 files changed

+13
-7
lines changed

3 files changed

+13
-7
lines changed

Makefile

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -211,9 +211,7 @@ MK_CFLAGS += \
211211
-Werror=implicit-function-declaration
212212

213213
MK_CXXFLAGS += \
214-
$(WARN_FLAGS) \
215-
-Wmissing-declarations \
216-
-Wmissing-noreturn
214+
$(WARN_FLAGS)
217215

218216
ifeq ($(CHATLLM_FATAL_WARNINGS),1)
219217
MK_CFLAGS += -Werror

src/layers.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -999,7 +999,7 @@ namespace chatllm
999999

10001000
ggml::tensor *BaichuanSelfAttention::apply_pos_embedding_kq(ComputeContext *ctx, ggml::tensor *kq, int hidden_size, int qlen, ggml::tensor *past) const
10011001
{
1002-
return ggml::map_custom1(ctx, kq, ggml_compute_forward_custom_alibi, GGML_N_TASKS_MAX, (void *)&alibi);
1002+
return ggml::map_custom1(ctx, kq, ggml_compute_forward_custom_alibi, GGML_N_TASKS_MAX, const_cast<void *>((const void *)&alibi));
10031003
}
10041004

10051005
QWenSelfAttention::QWenSelfAttention(InitContext *ctx, int hidden_size, int num_attention_heads, int max_length)

src/layers.h

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,16 @@ namespace chatllm
133133
CHATLLM_THROW << "forward(ComputeContext *ctx, ggml::tensor *input, int n_past): not implemented";
134134
return NULL;
135135
}
136+
virtual ggml::tensor *forward(ComputeContext *ctx, ggml::tensor *hidden_states, ggml::tensor *_)
137+
{
138+
CHATLLM_THROW << "forward(ComputeContext *ctx, ggml::tensor *hidden_states, ggml::tensor *_): not implemented";
139+
return NULL;
140+
}
141+
virtual ggml::tensor *forward(ComputeContext *ctx, ggml::tensor *patches, int patches_per_row, ggml::tensor *text_input)
142+
{
143+
CHATLLM_THROW << "forward(ComputeContext *ctx, ggml::tensor *patches, int patches_per_row, ggml::tensor *text_input): not implemented";
144+
return NULL;
145+
}
136146
virtual void set_ctx(int n_ctx) { }
137147
virtual void shift_cache(int shift, int total) { }
138148

@@ -220,8 +230,6 @@ namespace chatllm
220230
public:
221231
VisualEmbedding(InitContext *ctx, int num_embeddings, int embedding_dim)
222232
: Embedding(ctx, num_embeddings, embedding_dim) {}
223-
224-
virtual ggml::tensor *forward(ComputeContext *ctx, ggml::tensor *patches, int patches_per_row, ggml::tensor *text_input) = 0;
225233
};
226234

227235
class Linear : public Block
@@ -1848,7 +1856,7 @@ namespace chatllm
18481856
norm(ctx, hidden_size)
18491857
{}
18501858

1851-
ggml::tensor *forward(ComputeContext *ctx, ggml::tensor *hidden_states, ggml::tensor *attention_output);
1859+
ggml::tensor *forward(ComputeContext *ctx, ggml::tensor *hidden_states, ggml::tensor *attention_output) override;
18521860

18531861
public:
18541862
Linear dense;

0 commit comments

Comments
 (0)