@@ -133,6 +133,16 @@ namespace chatllm
133
133
CHATLLM_THROW << " forward(ComputeContext *ctx, ggml::tensor *input, int n_past): not implemented" ;
134
134
return NULL ;
135
135
}
136
+ virtual ggml::tensor *forward (ComputeContext *ctx, ggml::tensor *hidden_states, ggml::tensor *_)
137
+ {
138
+ CHATLLM_THROW << " forward(ComputeContext *ctx, ggml::tensor *hidden_states, ggml::tensor *_): not implemented" ;
139
+ return NULL ;
140
+ }
141
+ virtual ggml::tensor *forward (ComputeContext *ctx, ggml::tensor *patches, int patches_per_row, ggml::tensor *text_input)
142
+ {
143
+ CHATLLM_THROW << " forward(ComputeContext *ctx, ggml::tensor *patches, int patches_per_row, ggml::tensor *text_input): not implemented" ;
144
+ return NULL ;
145
+ }
136
146
virtual void set_ctx (int n_ctx) { }
137
147
virtual void shift_cache (int shift, int total) { }
138
148
@@ -220,8 +230,6 @@ namespace chatllm
220
230
public:
221
231
VisualEmbedding (InitContext *ctx, int num_embeddings, int embedding_dim)
222
232
: Embedding(ctx, num_embeddings, embedding_dim) {}
223
-
224
- virtual ggml::tensor *forward (ComputeContext *ctx, ggml::tensor *patches, int patches_per_row, ggml::tensor *text_input) = 0;
225
233
};
226
234
227
235
class Linear : public Block
@@ -1848,7 +1856,7 @@ namespace chatllm
1848
1856
norm (ctx, hidden_size)
1849
1857
{}
1850
1858
1851
- ggml::tensor *forward (ComputeContext *ctx, ggml::tensor *hidden_states, ggml::tensor *attention_output);
1859
+ ggml::tensor *forward (ComputeContext *ctx, ggml::tensor *hidden_states, ggml::tensor *attention_output) override ;
1852
1860
1853
1861
public:
1854
1862
Linear dense;
0 commit comments