|
17 | 17 | using System.Collections.Generic;
|
18 | 18 | using Firebase.AI.Internal;
|
19 | 19 |
|
20 |
| -namespace Firebase.AI { |
| 20 | +namespace Firebase.AI |
| 21 | +{ |
| 22 | + /// <summary> |
| 23 | + /// Represents the reason why the model stopped generating content. |
| 24 | + /// </summary> |
| 25 | + public enum FinishReason |
| 26 | + { |
| 27 | + /// <summary> |
| 28 | + /// A new and not yet supported value. |
| 29 | + /// </summary> |
| 30 | + Unknown = 0, |
| 31 | + /// <summary> |
| 32 | + /// Natural stop point of the model or provided stop sequence. |
| 33 | + /// </summary> |
| 34 | + Stop, |
| 35 | + /// <summary> |
| 36 | + /// The maximum number of tokens as specified in the request was reached. |
| 37 | + /// </summary> |
| 38 | + MaxTokens, |
| 39 | + /// <summary> |
| 40 | + /// The token generation was stopped because the response was flagged for safety reasons. |
| 41 | + /// </summary> |
| 42 | + Safety, |
| 43 | + /// <summary> |
| 44 | + /// The token generation was stopped because the response was flagged for unauthorized citations. |
| 45 | + /// </summary> |
| 46 | + Recitation, |
| 47 | + /// <summary> |
| 48 | + /// All other reasons that stopped token generation. |
| 49 | + /// </summary> |
| 50 | + Other, |
| 51 | + /// <summary> |
| 52 | + /// Token generation was stopped because the response contained forbidden terms. |
| 53 | + /// </summary> |
| 54 | + Blocklist, |
| 55 | + /// <summary> |
| 56 | + /// Token generation was stopped because the response contained potentially prohibited content. |
| 57 | + /// </summary> |
| 58 | + ProhibitedContent, |
| 59 | + /// <summary> |
| 60 | + /// Token generation was stopped because of Sensitive Personally Identifiable Information (SPII). |
| 61 | + /// </summary> |
| 62 | + SPII, |
| 63 | + /// <summary> |
| 64 | + /// Token generation was stopped because the function call generated by the model was invalid. |
| 65 | + /// </summary> |
| 66 | + MalformedFunctionCall, |
| 67 | + } |
21 | 68 |
|
22 |
| -/// <summary> |
23 |
| -/// Represents the reason why the model stopped generating content. |
24 |
| -/// </summary> |
25 |
| -public enum FinishReason { |
26 |
| - /// <summary> |
27 |
| - /// A new and not yet supported value. |
28 |
| - /// </summary> |
29 |
| - Unknown = 0, |
30 |
| - /// <summary> |
31 |
| - /// Natural stop point of the model or provided stop sequence. |
32 |
| - /// </summary> |
33 |
| - Stop, |
34 |
| - /// <summary> |
35 |
| - /// The maximum number of tokens as specified in the request was reached. |
36 |
| - /// </summary> |
37 |
| - MaxTokens, |
38 |
| - /// <summary> |
39 |
| - /// The token generation was stopped because the response was flagged for safety reasons. |
40 |
| - /// </summary> |
41 |
| - Safety, |
42 |
| - /// <summary> |
43 |
| - /// The token generation was stopped because the response was flagged for unauthorized citations. |
44 |
| - /// </summary> |
45 |
| - Recitation, |
46 | 69 | /// <summary>
|
47 |
| - /// All other reasons that stopped token generation. |
| 70 | + /// A struct representing a possible reply to a content generation prompt. |
| 71 | + /// Each content generation prompt may produce multiple candidate responses. |
48 | 72 | /// </summary>
|
49 |
| - Other, |
50 |
| - /// <summary> |
51 |
| - /// Token generation was stopped because the response contained forbidden terms. |
52 |
| - /// </summary> |
53 |
| - Blocklist, |
54 |
| - /// <summary> |
55 |
| - /// Token generation was stopped because the response contained potentially prohibited content. |
56 |
| - /// </summary> |
57 |
| - ProhibitedContent, |
58 |
| - /// <summary> |
59 |
| - /// Token generation was stopped because of Sensitive Personally Identifiable Information (SPII). |
60 |
| - /// </summary> |
61 |
| - SPII, |
62 |
| - /// <summary> |
63 |
| - /// Token generation was stopped because the function call generated by the model was invalid. |
64 |
| - /// </summary> |
65 |
| - MalformedFunctionCall, |
66 |
| -} |
| 73 | + public readonly struct Candidate |
| 74 | + { |
| 75 | + private readonly IReadOnlyList<SafetyRating> _safetyRatings; |
67 | 76 |
|
68 |
| -/// <summary> |
69 |
| -/// A struct representing a possible reply to a content generation prompt. |
70 |
| -/// Each content generation prompt may produce multiple candidate responses. |
71 |
| -/// </summary> |
72 |
| -public readonly struct Candidate { |
73 |
| - private readonly IReadOnlyList<SafetyRating> _safetyRatings; |
| 77 | + /// <summary> |
| 78 | + /// The response’s content. |
| 79 | + /// </summary> |
| 80 | + public ModelContent Content { get; } |
74 | 81 |
|
75 |
| - /// <summary> |
76 |
| - /// The response’s content. |
77 |
| - /// </summary> |
78 |
| - public ModelContent Content { get; } |
79 |
| - |
80 |
| - /// <summary> |
81 |
| - /// The safety rating of the response content. |
82 |
| - /// </summary> |
83 |
| - public IReadOnlyList<SafetyRating> SafetyRatings { |
84 |
| - get { |
85 |
| - return _safetyRatings ?? new List<SafetyRating>(); |
| 82 | + /// <summary> |
| 83 | + /// The safety rating of the response content. |
| 84 | + /// </summary> |
| 85 | + public IReadOnlyList<SafetyRating> SafetyRatings |
| 86 | + { |
| 87 | + get |
| 88 | + { |
| 89 | + return _safetyRatings ?? new List<SafetyRating>(); |
| 90 | + } |
86 | 91 | }
|
87 |
| - } |
88 | 92 |
|
89 |
| - /// <summary> |
90 |
| - /// The reason the model stopped generating content, if it exists; |
91 |
| - /// for example, if the model generated a predefined stop sequence. |
92 |
| - /// </summary> |
93 |
| - public FinishReason? FinishReason { get; } |
| 93 | + /// <summary> |
| 94 | + /// The reason the model stopped generating content, if it exists; |
| 95 | + /// for example, if the model generated a predefined stop sequence. |
| 96 | + /// </summary> |
| 97 | + public FinishReason? FinishReason { get; } |
94 | 98 |
|
95 |
| - /// <summary> |
96 |
| - /// Cited works in the model’s response content, if it exists. |
97 |
| - /// </summary> |
98 |
| - public CitationMetadata? CitationMetadata { get; } |
| 99 | + /// <summary> |
| 100 | + /// Cited works in the model’s response content, if it exists. |
| 101 | + /// </summary> |
| 102 | + public CitationMetadata? CitationMetadata { get; } |
99 | 103 |
|
100 |
| - /// <summary> |
101 |
| - /// Grounding metadata for the response, if any. |
102 |
| - /// </summary> |
103 |
| - public GroundingMetadata? GroundingMetadata { get; } |
104 |
| - |
105 |
| - /// <summary> |
106 |
| - /// Metadata related to the `URLContext` tool. |
107 |
| - /// </summary> |
108 |
| - public UrlContextMetadata? UrlContextMetadata { get; } |
| 104 | + /// <summary> |
| 105 | + /// Grounding metadata for the response, if any. |
| 106 | + /// </summary> |
| 107 | + public GroundingMetadata? GroundingMetadata { get; } |
109 | 108 |
|
110 |
| - // Hidden constructor, users don't need to make this. |
111 |
| - private Candidate(ModelContent content, List<SafetyRating> safetyRatings, |
112 |
| - FinishReason? finishReason, CitationMetadata? citationMetadata, |
113 |
| - GroundingMetadata? groundingMetadata, UrlContextMetadata? urlContextMetadata) { |
114 |
| - Content = content; |
115 |
| - _safetyRatings = safetyRatings ?? new List<SafetyRating>(); |
116 |
| - FinishReason = finishReason; |
117 |
| - CitationMetadata = citationMetadata; |
118 |
| - GroundingMetadata = groundingMetadata; |
119 |
| - UrlContextMetadata = urlContextMetadata; |
120 |
| - } |
| 109 | + /// <summary> |
| 110 | + /// Metadata related to the `URLContext` tool. |
| 111 | + /// </summary> |
| 112 | + public UrlContextMetadata? UrlContextMetadata { get; } |
121 | 113 |
|
122 |
| - private static FinishReason ParseFinishReason(string str) { |
123 |
| - return str switch { |
124 |
| - "STOP" => Firebase.AI.FinishReason.Stop, |
125 |
| - "MAX_TOKENS" => Firebase.AI.FinishReason.MaxTokens, |
126 |
| - "SAFETY" => Firebase.AI.FinishReason.Safety, |
127 |
| - "RECITATION" => Firebase.AI.FinishReason.Recitation, |
128 |
| - "OTHER" => Firebase.AI.FinishReason.Other, |
129 |
| - "BLOCKLIST" => Firebase.AI.FinishReason.Blocklist, |
130 |
| - "PROHIBITED_CONTENT" => Firebase.AI.FinishReason.ProhibitedContent, |
131 |
| - "SPII" => Firebase.AI.FinishReason.SPII, |
132 |
| - "MALFORMED_FUNCTION_CALL" => Firebase.AI.FinishReason.MalformedFunctionCall, |
133 |
| - _ => Firebase.AI.FinishReason.Unknown, |
134 |
| - }; |
135 |
| - } |
| 114 | + // Hidden constructor, users don't need to make this. |
| 115 | + private Candidate(ModelContent content, List<SafetyRating> safetyRatings, |
| 116 | + FinishReason? finishReason, CitationMetadata? citationMetadata, |
| 117 | + GroundingMetadata? groundingMetadata, UrlContextMetadata? urlContextMetadata) |
| 118 | + { |
| 119 | + Content = content; |
| 120 | + _safetyRatings = safetyRatings ?? new List<SafetyRating>(); |
| 121 | + FinishReason = finishReason; |
| 122 | + CitationMetadata = citationMetadata; |
| 123 | + GroundingMetadata = groundingMetadata; |
| 124 | + UrlContextMetadata = urlContextMetadata; |
| 125 | + } |
136 | 126 |
|
137 |
| - /// <summary> |
138 |
| - /// Intended for internal use only. |
139 |
| - /// This method is used for deserializing JSON responses and should not be called directly. |
140 |
| - /// </summary> |
141 |
| - internal static Candidate FromJson(Dictionary<string, object> jsonDict, |
142 |
| - FirebaseAI.Backend.InternalProvider backend) { |
143 |
| - return new Candidate( |
144 |
| - jsonDict.ParseObject("content", ModelContent.FromJson, defaultValue: new ModelContent("model")), |
145 |
| - jsonDict.ParseObjectList("safetyRatings", SafetyRating.FromJson), |
146 |
| - jsonDict.ParseNullableEnum("finishReason", ParseFinishReason), |
147 |
| - jsonDict.ParseNullableObject("citationMetadata", |
148 |
| - (d) => Firebase.AI.CitationMetadata.FromJson(d, backend)), |
149 |
| - jsonDict.ParseNullableObject("groundingMetadata", |
150 |
| - Firebase.AI.GroundingMetadata.FromJson), |
151 |
| - jsonDict.ParseNullableObject("urlContextMetadata", |
152 |
| - Firebase.AI.UrlContextMetadata.FromJson)); |
| 127 | + private static FinishReason ParseFinishReason(string str) |
| 128 | + { |
| 129 | + return str switch |
| 130 | + { |
| 131 | + "STOP" => Firebase.AI.FinishReason.Stop, |
| 132 | + "MAX_TOKENS" => Firebase.AI.FinishReason.MaxTokens, |
| 133 | + "SAFETY" => Firebase.AI.FinishReason.Safety, |
| 134 | + "RECITATION" => Firebase.AI.FinishReason.Recitation, |
| 135 | + "OTHER" => Firebase.AI.FinishReason.Other, |
| 136 | + "BLOCKLIST" => Firebase.AI.FinishReason.Blocklist, |
| 137 | + "PROHIBITED_CONTENT" => Firebase.AI.FinishReason.ProhibitedContent, |
| 138 | + "SPII" => Firebase.AI.FinishReason.SPII, |
| 139 | + "MALFORMED_FUNCTION_CALL" => Firebase.AI.FinishReason.MalformedFunctionCall, |
| 140 | + _ => Firebase.AI.FinishReason.Unknown, |
| 141 | + }; |
| 142 | + } |
| 143 | + |
| 144 | + /// <summary> |
| 145 | + /// Intended for internal use only. |
| 146 | + /// This method is used for deserializing JSON responses and should not be called directly. |
| 147 | + /// </summary> |
| 148 | + internal static Candidate FromJson(Dictionary<string, object> jsonDict, |
| 149 | + FirebaseAI.Backend.InternalProvider backend) |
| 150 | + { |
| 151 | + return new Candidate( |
| 152 | + jsonDict.ParseObject("content", ModelContent.FromJson, defaultValue: new ModelContent("model")), |
| 153 | + jsonDict.ParseObjectList("safetyRatings", SafetyRating.FromJson), |
| 154 | + jsonDict.ParseNullableEnum("finishReason", ParseFinishReason), |
| 155 | + jsonDict.ParseNullableObject("citationMetadata", |
| 156 | + (d) => Firebase.AI.CitationMetadata.FromJson(d, backend)), |
| 157 | + jsonDict.ParseNullableObject("groundingMetadata", |
| 158 | + Firebase.AI.GroundingMetadata.FromJson), |
| 159 | + jsonDict.ParseNullableObject("urlContextMetadata", |
| 160 | + Firebase.AI.UrlContextMetadata.FromJson)); |
| 161 | + } |
153 | 162 | }
|
154 |
| -} |
155 | 163 |
|
156 | 164 | }
|
0 commit comments