Skip to content

Commit 4b12ceb

Browse files
committed
create GenerateText snippets
1 parent 66ed753 commit 4b12ceb

File tree

4 files changed

+69
-0
lines changed

4 files changed

+69
-0
lines changed

.editorconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
[*.{java,kt}]
22
max_line_length = 120
3+
indent_size = 2
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
package com.google.firebase.example.ai.googleai
2+
3+
import com.google.firebase.Firebase
4+
import com.google.firebase.ai.ai
5+
import com.google.firebase.ai.type.GenerativeBackend
6+
7+
class FirebaseAILogic {
8+
9+
private fun initialization() {
10+
// [START initialize_googleai_and_model]
11+
// Initialize the Gemini Developer API backend service
12+
// Create a `GenerativeModel` instance with a model that supports your use case
13+
val model = Firebase.ai(backend = GenerativeBackend.googleAI())
14+
.generativeModel("gemini-2.0-flash")
15+
// [END initialize_googleai_and_model]
16+
}
17+
18+
19+
}
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
package com.google.firebase.example.ai.send_requests
2+
3+
import com.google.firebase.ai.GenerativeModel
4+
5+
class GenerateText {
6+
7+
private suspend fun textOnlyNonStreaming(model: GenerativeModel) {
8+
// [START text_only_non_streaming]
9+
// Provide a prompt that contains text
10+
val prompt = "Write a story about a magic backpack."
11+
12+
// To generate text output, call generateContent with the text input
13+
val response = model.generateContent(prompt)
14+
print(response.text)
15+
// [END text_only_non_streaming]
16+
}
17+
18+
private suspend fun textOnlyStreaming(model: GenerativeModel) {
19+
// [START text_only_streaming]
20+
// Provide a prompt that includes only text
21+
val prompt = "Write a story about a magic backpack."
22+
23+
// To stream generated text output, call generateContentStream and pass in the prompt
24+
var response = ""
25+
model.generateContentStream(prompt).collect { chunk ->
26+
print(chunk.text)
27+
response += chunk.text
28+
}
29+
// [END text_only_streaming]
30+
}
31+
}
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
package com.google.firebase.example.ai.vertexai
2+
3+
import com.google.firebase.Firebase
4+
import com.google.firebase.ai.ai
5+
import com.google.firebase.ai.type.GenerativeBackend
6+
7+
class FirebaseAILogic {
8+
9+
private fun initialization() {
10+
// [START initialize_vertexai_and_model]
11+
// Initialize the Vertex AI Gemini API backend service
12+
// Create a `GenerativeModel` instance with a model that supports your use case
13+
val model = Firebase.ai(backend = GenerativeBackend.vertexAI())
14+
.generativeModel("gemini-2.0-flash")
15+
// [END initialize_vertexai_and_model]
16+
}
17+
18+
}

0 commit comments

Comments
 (0)