Skip to content

Commit 11cf91b

Browse files
chore(ai): write example app for ai package
1 parent e8781b2 commit 11cf91b

File tree

1 file changed

+328
-0
lines changed
  • tests/test-app/examples/ai

1 file changed

+328
-0
lines changed

tests/test-app/examples/ai/ai.js

Lines changed: 328 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,328 @@
1+
import React, { useState } from 'react';
2+
import { AppRegistry, Button, View, Text, Pressable } from 'react-native';
3+
4+
import { getApp } from '@react-native-firebase/app';
5+
import { getAI, getGenerativeModel, Schema } from '@react-native-firebase/ai';
6+
import {
7+
PDF_BASE_64,
8+
POEM_BASE_64,
9+
VIDEO_BASE_64,
10+
IMAGE_BASE_64,
11+
EMOJI_BASE_64,
12+
} from '../vertexai/base-64-media';
13+
14+
// eslint-disable-next-line react/prop-types
15+
function OptionSelector({ selectedOption, setSelectedOption }) {
16+
const options = ['image', 'pdf', 'video', 'audio', 'emoji'];
17+
18+
return (
19+
<View style={{ flexDirection: 'row', flexWrap: 'wrap', margin: 10 }}>
20+
{options.map(option => {
21+
const isSelected = selectedOption === option;
22+
return (
23+
<Pressable
24+
key={option}
25+
onPress={() => setSelectedOption(option)}
26+
style={{
27+
paddingVertical: 10,
28+
paddingHorizontal: 15,
29+
margin: 5,
30+
borderRadius: 8,
31+
borderWidth: 1,
32+
borderColor: isSelected ? '#007bff' : '#ccc',
33+
backgroundColor: isSelected ? '#007bff' : '#fff',
34+
}}
35+
>
36+
<Text style={{ color: isSelected ? '#fff' : '#000', fontSize: 16 }}>
37+
{option.toUpperCase()}
38+
</Text>
39+
</Pressable>
40+
);
41+
})}
42+
</View>
43+
);
44+
}
45+
46+
function App() {
47+
const [selectedOption, setSelectedOption] = useState('image');
48+
const getMediaDetails = option => {
49+
switch (option) {
50+
case 'image':
51+
return { data: IMAGE_BASE_64.trim(), mimeType: 'image/jpeg', prompt: 'What can you see?' };
52+
case 'pdf':
53+
return {
54+
data: PDF_BASE_64.trim(),
55+
mimeType: 'application/pdf',
56+
prompt: 'What can you see?',
57+
};
58+
case 'video':
59+
return { data: VIDEO_BASE_64.trim(), mimeType: 'video/mp4', prompt: 'What can you see?' };
60+
case 'audio':
61+
return { data: POEM_BASE_64.trim(), mimeType: 'audio/mp3', prompt: 'What can you hear?' };
62+
case 'emoji':
63+
return { data: EMOJI_BASE_64.trim(), mimeType: 'image/png', prompt: 'What can you see?' };
64+
default:
65+
console.error('Invalid option selected');
66+
return null;
67+
}
68+
};
69+
70+
return (
71+
<View>
72+
<View style={{ height: 90 }} />
73+
<Button
74+
title="Generate Content"
75+
onPress={async () => {
76+
try {
77+
const app = getApp();
78+
const ai = getAI(app);
79+
const model = getGenerativeModel(ai, { model: 'gemini-1.5-flash' });
80+
81+
const result = await model.generateContent('What is 2 + 2?');
82+
83+
console.log('result', result.response.text());
84+
} catch (e) {
85+
console.error(e);
86+
}
87+
}}
88+
/>
89+
<Button
90+
title="Generate Content Stream"
91+
onPress={async () => {
92+
try {
93+
const app = getApp();
94+
const ai = getAI(app);
95+
const model = getGenerativeModel(ai, { model: 'gemini-1.5-flash' });
96+
97+
const result = await model.generateContentStream('Write me a short, funny rap');
98+
99+
let text = '';
100+
for await (const chunk of result.stream) {
101+
const chunkText = chunk.text();
102+
console.log(chunkText);
103+
104+
text += chunkText;
105+
}
106+
107+
console.log('result', text);
108+
} catch (e) {
109+
console.error(e);
110+
}
111+
}}
112+
/>
113+
<Text style={{ margin: 10, fontSize: 16 }}>Select a File Type for multi-modal input:</Text>
114+
<OptionSelector selectedOption={selectedOption} setSelectedOption={setSelectedOption} />
115+
<Button
116+
title="Generate Content Stream multi-modal"
117+
onPress={async () => {
118+
try {
119+
const app = getApp();
120+
const ai = getAI(app);
121+
const model = getGenerativeModel(ai, { model: 'gemini-1.5-flash' });
122+
const mediaDetails = getMediaDetails(selectedOption);
123+
if (!mediaDetails) return;
124+
125+
const { data, mimeType, prompt } = mediaDetails;
126+
127+
// Call generateContentStream with the text and images
128+
const response = await model.generateContentStream([
129+
prompt,
130+
{ inlineData: { mimeType, data } },
131+
]);
132+
133+
let text = '';
134+
for await (const chunk of response.stream) {
135+
text += chunk.text();
136+
}
137+
138+
console.log('Generated text:', text);
139+
} catch (e) {
140+
console.error(e);
141+
}
142+
}}
143+
/>
144+
<Button
145+
title="Generate JSON Response"
146+
onPress={async () => {
147+
try {
148+
const app = getApp();
149+
const ai = getAI(app);
150+
const jsonSchema = Schema.object({
151+
properties: {
152+
characters: Schema.array({
153+
items: Schema.object({
154+
properties: {
155+
name: Schema.string(),
156+
accessory: Schema.string(),
157+
age: Schema.number(),
158+
species: Schema.string(),
159+
},
160+
optionalProperties: ['accessory'],
161+
}),
162+
}),
163+
},
164+
});
165+
const model = getGenerativeModel(ai, {
166+
model: 'gemini-1.5-flash',
167+
generationConfig: {
168+
responseMimeType: 'application/json',
169+
responseSchema: jsonSchema,
170+
},
171+
});
172+
173+
let prompt = "For use in a children's card game, generate 10 animal-based characters.";
174+
175+
let result = await model.generateContent(prompt);
176+
console.log(result.response.text());
177+
} catch (e) {
178+
console.error(e);
179+
}
180+
}}
181+
/>
182+
<Button
183+
title="Start Chat"
184+
onPress={async () => {
185+
try {
186+
const app = getApp();
187+
const ai = getAI(app);
188+
const model = getGenerativeModel(ai, { model: 'gemini-1.5-flash' });
189+
190+
const chat = model.startChat({
191+
history: [
192+
{
193+
role: 'user',
194+
parts: [{ text: 'Hello, I have 2 dogs in my house.' }],
195+
},
196+
{
197+
role: 'model',
198+
parts: [{ text: 'Great to meet you. What would you like to know?' }],
199+
},
200+
],
201+
generationConfig: {
202+
maxOutputTokens: 100,
203+
},
204+
});
205+
206+
const msg = 'How many paws are in my house?';
207+
const result = await chat.sendMessageStream(msg);
208+
209+
let text = '';
210+
for await (const chunk of result.stream) {
211+
const chunkText = chunk.text();
212+
text += chunkText;
213+
}
214+
console.log(text);
215+
chat.getHistory();
216+
} catch (e) {
217+
console.error(e);
218+
}
219+
}}
220+
/>
221+
<Button
222+
title="Count Tokens"
223+
onPress={async () => {
224+
try {
225+
const app = getApp();
226+
const ai = getAI(app);
227+
const model = getGenerativeModel(ai, { model: 'gemini-1.5-flash' });
228+
229+
const result = await model.countTokens('What is 2 + 2?');
230+
231+
console.log('totalBillableCharacters', result.totalBillableCharacters);
232+
console.log('totalTokens', result.totalTokens);
233+
} catch (e) {
234+
console.error(e);
235+
}
236+
}}
237+
/>
238+
239+
<Button
240+
title="Function Calling"
241+
onPress={async () => {
242+
// This function calls a hypothetical external API that returns
243+
// a collection of weather information for a given location on a given date.
244+
// `location` is an object of the form { city: string, state: string }
245+
async function fetchWeather({ location, date }) {
246+
// For demo purposes, this hypothetical response is hardcoded here in the expected format.
247+
return {
248+
temperature: 38,
249+
chancePrecipitation: '56%',
250+
cloudConditions: 'partlyCloudy',
251+
};
252+
}
253+
const fetchWeatherTool = {
254+
functionDeclarations: [
255+
{
256+
name: 'fetchWeather',
257+
description: 'Get the weather conditions for a specific city on a specific date',
258+
parameters: Schema.object({
259+
properties: {
260+
location: Schema.object({
261+
description:
262+
'The name of the city and its state for which to get ' +
263+
'the weather. Only cities in the USA are supported.',
264+
properties: {
265+
city: Schema.string({
266+
description: 'The city of the location.',
267+
}),
268+
state: Schema.string({
269+
description: 'The US state of the location.',
270+
}),
271+
},
272+
}),
273+
date: Schema.string({
274+
description:
275+
'The date for which to get the weather. Date must be in the' +
276+
' format: YYYY-MM-DD.',
277+
}),
278+
},
279+
}),
280+
},
281+
],
282+
};
283+
try {
284+
const app = getApp();
285+
const ai = getAI(app);
286+
const model = getGenerativeModel(ai, {
287+
model: 'gemini-1.5-flash',
288+
tools: fetchWeatherTool,
289+
});
290+
291+
const chat = model.startChat();
292+
const prompt = 'What was the weather in Boston on October 17, 2024?';
293+
294+
// Send the user's question (the prompt) to the model using multi-turn chat.
295+
let result = await chat.sendMessage(prompt);
296+
const functionCalls = result.response.functionCalls();
297+
let functionCall;
298+
let functionResult;
299+
// When the model responds with one or more function calls, invoke the function(s).
300+
if (functionCalls.length > 0) {
301+
for (const call of functionCalls) {
302+
if (call.name === 'fetchWeather') {
303+
// Forward the structured input data prepared by the model
304+
// to the hypothetical external API.
305+
functionResult = await fetchWeather(call.args);
306+
functionCall = call;
307+
}
308+
}
309+
}
310+
result = await chat.sendMessage([
311+
{
312+
functionResponse: {
313+
name: functionCall.name, // "fetchWeather"
314+
response: functionResult,
315+
},
316+
},
317+
]);
318+
console.log(result.response.text());
319+
} catch (e) {
320+
console.error(e);
321+
}
322+
}}
323+
/>
324+
</View>
325+
);
326+
}
327+
328+
AppRegistry.registerComponent('testing', () => App);

0 commit comments

Comments
 (0)