1
+ // Copyright 2024 Google LLC
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ // ignore_for_file: avoid_print, unused_local_variable
16
+
17
+ import 'dart:io' ;
18
+
19
+ import 'package:firebase_core/firebase_core.dart' ;
20
+ import 'package:firebase_vertexai/firebase_vertexai.dart' ;
21
+ import 'package:firebase_snippets_app/snippets/snippet_base.dart' ;
22
+
23
+ class VertexAISnippets extends DocSnippet {
24
+ late final GenerativeModel model;
25
+
26
+ @override
27
+ void runAll () {
28
+ initializeModel ();
29
+ configureModel ();
30
+ safetySetting ();
31
+ multiSafetySetting ();
32
+ textGenTextOnlyPromptStream ();
33
+ textGenTextOnlyPrompt ();
34
+ textGenMultimodalOneImagePromptStream ();
35
+ textGenMultimodalOneImagePrompt ();
36
+ textGenMultiModalMultiImagePromptStreaming ();
37
+ textGenMultiModalMultiImagePrompt ();
38
+ textGenMultiModalVideoPromptStreaming ();
39
+ textGenMultiModalVideoPrompt ();
40
+ countTokensText ();
41
+ countTokensTextImage ();
42
+ chatStream ();
43
+ chat ();
44
+ setSystemInstructions ();
45
+ }
46
+
47
+ void initializeModel () async {
48
+ // [START initialize_model]
49
+ // Initialize FirebaseApp
50
+ await Firebase .initializeApp ();
51
+ // Initialize the {{vertexai}} service and the generative model
52
+ // Specify a model that supports your use case
53
+ // Gemini 1.5 models are versatile and can be used with all API capabilities
54
+ final model =
55
+ FirebaseVertexAI .instance.generativeModel (model: '{{generic_model_name_initialization}}' );
56
+ // [END initialize_model]
57
+ }
58
+
59
+ void configureModel () {
60
+ // [START configure_model]
61
+ // ...
62
+
63
+ final generationConfig = GenerationConfig (
64
+ maxOutputTokens: 200 ,
65
+ stopSequences: ["red" ],
66
+ temperature: 0.9 ,
67
+ topP: 0.1 ,
68
+ topK: 16 ,
69
+ );
70
+ final model = FirebaseVertexAI .instance.generativeModel (
71
+ model: '{{generic_model_name_initialization}}' ,
72
+ generationConfig: generationConfig,
73
+ );
74
+
75
+ // ...
76
+ // [END configure_model]
77
+ }
78
+
79
+ void safetySetting () {
80
+ // [START safety_setting]
81
+ // ...
82
+
83
+ final safetySettings = [
84
+ SafetySetting (HarmCategory .harassment, HarmBlockThreshold .high)
85
+ ];
86
+ final model = FirebaseVertexAI .instance.generativeModel (
87
+ model: '{{generic_model_name_initialization}}' ,
88
+ safetySettings: safetySettings,
89
+ );
90
+
91
+ // ...
92
+ // [END safety_setting]
93
+ }
94
+
95
+ void multiSafetySetting () {
96
+ // [START multi_safety_setting]
97
+ // ...
98
+
99
+ final safetySettings = [
100
+ SafetySetting (HarmCategory .harassment, HarmBlockThreshold .high),
101
+ SafetySetting (HarmCategory .hateSpeech, HarmBlockThreshold .high),
102
+ ];
103
+ final model = FirebaseVertexAI .instance.generativeModel (
104
+ model: '{{generic_model_name_initialization}}' ,
105
+ safetySettings: safetySettings,
106
+ );
107
+
108
+ // ...
109
+ // [END multi_safety_setting]
110
+ }
111
+
112
+ void textGenTextOnlyPromptStream () async {
113
+ // [START text_gen_text_only_prompt_streaming]
114
+ // Provide a prompt that contains text
115
+ final prompt = [Content .text ('Write a story about a magic backpack.' )];
116
+
117
+ // To stream generated text output, call generateContentStream with the text input
118
+ final response = model.generateContentStream (prompt);
119
+ await for (final chunk in response) {
120
+ print (chunk.text);
121
+ }
122
+ // [END text_gen_text_only_prompt_streaming]
123
+ }
124
+
125
+ void textGenTextOnlyPrompt () async {
126
+ // [START text_gen_text_only_prompt]
127
+ // Provide a prompt that contains text
128
+ final prompt = [Content .text ('Write a story about a magic backpack.' )];
129
+
130
+ // To generate text output, call generateContent with the text input
131
+ final response = await model.generateContent (prompt);
132
+ print (response.text);
133
+ // [END text_gen_text_only_prompt]
134
+ }
135
+
136
+ void textGenMultimodalOneImagePromptStream () async {
137
+ // [START text_gen_multimodal_one_image_prompt_streaming]
138
+ // Provide a text prompt to include with the image
139
+ final prompt = TextPart ("What's in the picture?" );
140
+ // Prepare images for input
141
+ final image = await File ('image0.jpg' ).readAsBytes ();
142
+ final imagePart = DataPart ('image/jpeg' , image);
143
+
144
+ // To stream generated text output, call generateContentStream with the text and image
145
+ final response = await model.generateContentStream ([
146
+ Content .multi ([prompt,imagePart])
147
+ ]);
148
+ await for (final chunk in response) {
149
+ print (chunk.text);
150
+ }
151
+ // [END text_gen_multimodal_one_image_prompt_streaming]
152
+ }
153
+
154
+ void textGenMultimodalOneImagePrompt () async {
155
+ // [START text_gen_multimodal_one_image_prompt]
156
+ // Provide a text prompt to include with the image
157
+ final prompt = TextPart ("What's in the picture?" );
158
+ // Prepare images for input
159
+ final image = await File ('image0.jpg' ).readAsBytes ();
160
+ final imagePart = DataPart ('image/jpeg' , image);
161
+
162
+ // To generate text output, call generateContent with the text and image
163
+ final response = await model.generateContent ([
164
+ Content .multi ([prompt,imagePart])
165
+ ]);
166
+ print (response.text);
167
+ // [END text_gen_multimodal_one_image_prompt]
168
+ }
169
+
170
+ void textGenMultiModalMultiImagePromptStreaming () async {
171
+ // [START text_gen_multimodal_multi_image_prompt_streaming]
172
+ final (firstImage, secondImage) = await (
173
+ File ('image0.jpg' ).readAsBytes (),
174
+ File ('image1.jpg' ).readAsBytes ()
175
+ ).wait;
176
+ // Provide a text prompt to include with the images
177
+ final prompt = TextPart ("What's different between these pictures?" );
178
+ // Prepare images for input
179
+ final imageParts = [
180
+ DataPart ('image/jpeg' , firstImage),
181
+ DataPart ('image/jpeg' , secondImage),
182
+ ];
183
+
184
+ // To stream generated text output, call generateContentStream with the text and images
185
+ final response = model.generateContentStream ([
186
+ Content .multi ([prompt, ...imageParts])
187
+ ]);
188
+ await for (final chunk in response) {
189
+ print (chunk.text);
190
+ }
191
+ // [END text_gen_multimodal_multi_image_prompt_streaming]
192
+ }
193
+
194
+ void textGenMultiModalMultiImagePrompt () async {
195
+ // [START text_gen_multimodal_multi_image_prompt]
196
+ final (firstImage, secondImage) = await (
197
+ File ('image0.jpg' ).readAsBytes (),
198
+ File ('image1.jpg' ).readAsBytes ()
199
+ ).wait;
200
+ // Provide a text prompt to include with the images
201
+ final prompt = TextPart ("What's different between these pictures?" );
202
+ // Prepare images for input
203
+ final imageParts = [
204
+ DataPart ('image/jpeg' , firstImage),
205
+ DataPart ('image/jpeg' , secondImage),
206
+ ];
207
+
208
+ // To generate text output, call generateContent with the text and images
209
+ final response = await model.generateContent ([
210
+ Content .multi ([prompt, ...imageParts])
211
+ ]);
212
+ print (response.text);
213
+ // [END text_gen_multimodal_multi_image_prompt]
214
+ }
215
+
216
+ void textGenMultiModalVideoPromptStreaming () async {
217
+ // [START text_gen_multimodal_video_prompt_streaming]
218
+ // Provide a text prompt to include with the video
219
+ final prompt = TextPart ("What's in the video?" );
220
+
221
+ // Prepare video for input
222
+ final video = await File ('video0.mp4' ).readAsBytes ();
223
+
224
+ // Provide the video as `Data` with the appropriate mimetype
225
+ final videoPart = DataPart ('video/mp4' , video);
226
+
227
+ // To stream generated text output, call generateContentStream with the text and image
228
+ final response = model.generateContentStream ([
229
+ Content .multi ([prompt, videoPart])
230
+ ]);
231
+ await for (final chunk in response) {
232
+ print (chunk.text);
233
+ }
234
+ // [END text_gen_multimodal_video_prompt_streaming]
235
+ }
236
+
237
+ void textGenMultiModalVideoPrompt () async {
238
+ // [START text_gen_multimodal_video_prompt]
239
+ // Provide a text prompt to include with the video
240
+ final prompt = TextPart ("What's in the video?" );
241
+
242
+ // Prepare video for input
243
+ final video = await File ('video0.mp4' ).readAsBytes ();
244
+
245
+ // Provide the video as `Data` with the appropriate mimetype
246
+ final videoPart = DataPart ('video/mp4' , video);
247
+
248
+ // To generate text output, call generateContent with the text and images
249
+ final response = await model.generateContent ([
250
+ Content .multi ([prompt, videoPart])
251
+ ]);
252
+ print (response.text);
253
+ // [END text_gen_multimodal_video_prompt]
254
+ }
255
+
256
+ void countTokensText () async {
257
+ // [START count_tokens_text]
258
+ // Provide a prompt that contains text
259
+ final prompt = [Content .text ('Write a story about a magic backpack.' )];
260
+
261
+ // Count tokens and billable characters before calling generateContent
262
+ final tokenCount = await model.countTokens (prompt);
263
+ print ('Token count: ${tokenCount .totalTokens }, billable characters: ${tokenCount .totalBillableCharacters }' );
264
+
265
+ // To generate text output, call generateContent with the text input
266
+ final response = await model.generateContent (prompt);
267
+ print (response.text);
268
+ // [END count_tokens_text]
269
+ }
270
+
271
+ void countTokensTextImage () async {
272
+ // [START count_tokens_text_image]
273
+ // Provide a text prompt to include with the image
274
+ final prompt = TextPart ("What's in the picture?" );
275
+ // Prepare image for input
276
+ final image = await File ('image0.jpg' ).readAsBytes ();
277
+ final imagePart = DataPart ('image/jpeg' , image);
278
+
279
+ // Count tokens and billable characters before calling generateContent
280
+ final tokenCount = await model.countTokens ([
281
+ Content .multi ([prompt, imagePart])
282
+ ]);
283
+ print ('Token count: ${tokenCount .totalTokens }, billable characters: ${tokenCount .totalBillableCharacters }' );
284
+
285
+ // To generate text output, call generateContent with the text and image
286
+ final response = await model.generateContent ([
287
+ Content .multi ([prompt,imagePart])
288
+ ]);
289
+ print (response.text);
290
+ // [END count_tokens_text_image]
291
+ }
292
+
293
+ void chatStream () async {
294
+ // [START chat_streaming]
295
+ final chat = model.startChat ();
296
+ // Provide a prompt that contains text
297
+ final prompt = Content .text ('Write a story about a magic backpack.' );
298
+
299
+ final response = chat.sendMessageStream (prompt);
300
+ await for (final chunk in response) {
301
+ print (chunk.text);
302
+ }
303
+ // [END chat_streaming]
304
+ }
305
+
306
+ void chat () async {
307
+ // [START chat]
308
+ final chat = model.startChat ();
309
+ // Provide a prompt that contains text
310
+ final prompt = Content .text ('Write a story about a magic backpack.' );
311
+
312
+ final response = await chat.sendMessage (prompt);
313
+ print (response.text);
314
+ // [END chat]
315
+ }
316
+
317
+ void setSystemInstructions () async {
318
+ // [START system_instructions_text]
319
+ await Firebase .initializeApp ();
320
+ // Initialize the Vertex AI service and the generative model
321
+ // Specify a model that supports system instructions, like a Gemini 1.5 model
322
+ final model =
323
+ FirebaseVertexAI .instance.generativeModel (
324
+ model: 'gemini-1.5-flash-preview-0514' ,
325
+ systemInstruction: Content .system ('You are a cat. Your name is Neko.' ),
326
+ );
327
+ // [END system_instructions_text]
328
+ }
329
+ }
0 commit comments