diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ae1c6b5..e518437 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -128,6 +128,7 @@ jobs: # /usr/local/include) so the checked-in code is pinned to a # specific proto source, not whatever protoc version ships. protoc --descriptor_set_out=/tmp/wkt.pb --include_imports \ + --include_source_info \ -I buffa-types/protos \ google/protobuf/any.proto \ google/protobuf/duration.proto \ diff --git a/Taskfile.yml b/Taskfile.yml index 084eec2..114c8a0 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -433,7 +433,7 @@ tasks: Requires protoc on PATH. cmds: - >- - protoc --descriptor_set_out=/tmp/wkt.pb --include_imports + protoc --descriptor_set_out=/tmp/wkt.pb --include_imports --include_source_info -I buffa-types/protos google/protobuf/any.proto google/protobuf/duration.proto diff --git a/buffa-build/src/lib.rs b/buffa-build/src/lib.rs index 9a34fb9..baa9fb3 100644 --- a/buffa-build/src/lib.rs +++ b/buffa-build/src/lib.rs @@ -519,6 +519,7 @@ fn invoke_protoc( let mut cmd = Command::new(&protoc); cmd.arg("--include_imports"); + cmd.arg("--include_source_info"); cmd.arg(format!( "--descriptor_set_out={}", descriptor_path.display() @@ -555,6 +556,9 @@ fn invoke_protoc( /// module-root-relative; passing user paths to both would be a contradiction. /// Codegen filtering happens on our side via `files_to_generate` matching. fn invoke_buf() -> Result, Box> { + // buf build includes SourceCodeInfo by default (there's an + // --exclude-source-info flag to disable it), so proto comments + // propagate to generated code without an explicit opt-in here. let output = Command::new("buf") .arg("build") .arg("--as-file-descriptor-set") diff --git a/buffa-codegen/src/comments.rs b/buffa-codegen/src/comments.rs new file mode 100644 index 0000000..46a85c8 --- /dev/null +++ b/buffa-codegen/src/comments.rs @@ -0,0 +1,747 @@ +//! Source code comment extraction from protobuf descriptors. +//! +//! Protobuf stores source comments in `SourceCodeInfo`, attached to each +//! `FileDescriptorProto`. Comments are indexed by a *path* — a sequence of +//! field numbers and repeated-field indices that navigates from the +//! `FileDescriptorProto` root to a specific descriptor element. +//! +//! Rather than exposing these raw index-based paths to the rest of codegen, +//! this module translates them into an FQN-keyed map at construction time. +//! This trades a small up-front descriptor walk for significantly simpler +//! call sites: codegen functions look up comments by proto FQN (which they +//! already have) instead of threading index-based paths through every level +//! of the call stack. + +use std::collections::HashMap; + +use proc_macro2::TokenStream; +use quote::quote; + +use crate::generated::descriptor::{DescriptorProto, FileDescriptorProto}; + +// ── Descriptor field numbers (from google/protobuf/descriptor.proto) ──────── +// FileDescriptorProto +const FILE_MESSAGE_TYPE: i32 = 4; +const FILE_ENUM_TYPE: i32 = 5; + +// DescriptorProto +const MSG_FIELD: i32 = 2; +const MSG_NESTED_TYPE: i32 = 3; +const MSG_ENUM_TYPE: i32 = 4; +const MSG_ONEOF_DECL: i32 = 8; + +// EnumDescriptorProto +const ENUM_VALUE: i32 = 2; + +/// Walk a file descriptor's `SourceCodeInfo` and produce an FQN-keyed comment map. +/// +/// Returns `(fqn -> comment_string)` entries for messages, fields, enums, +/// enum values, and oneofs. FQNs use the same dotted form as `proto_fqn` +/// throughout codegen (no leading dot), e.g. `"example.v1.Person"`, +/// `"example.v1.Person.name"`. +pub fn fqn_comments(file: &FileDescriptorProto) -> HashMap { + let path_map = build_path_map(file); + if path_map.is_empty() { + return HashMap::new(); + } + + let package = file.package.as_deref().unwrap_or(""); + let mut result = HashMap::new(); + + // Top-level enums + for (i, enum_type) in file.enum_type.iter().enumerate() { + let enum_name = enum_type.name.as_deref().unwrap_or(""); + let fqn = fqn_join(package, enum_name); + let path = vec![FILE_ENUM_TYPE, i as i32]; + collect_enum_comments(&path_map, &path, &fqn, enum_type, &mut result); + } + + // Top-level messages + for (i, msg) in file.message_type.iter().enumerate() { + let msg_name = msg.name.as_deref().unwrap_or(""); + let fqn = fqn_join(package, msg_name); + let path = vec![FILE_MESSAGE_TYPE, i as i32]; + collect_message_comments(&path_map, &path, &fqn, msg, &mut result); + } + + result +} + +/// Build the raw path-based comment map from `SourceCodeInfo`. +fn build_path_map(file: &FileDescriptorProto) -> HashMap, String> { + let mut map = HashMap::new(); + let source_code_info = match file.source_code_info.as_option() { + Some(sci) => sci, + None => return map, + }; + for location in &source_code_info.location { + if let Some(comment) = format_comment(location) { + map.insert(location.path.clone(), comment); + } + } + map +} + +/// Recursively collect comments for a message and all its children. +fn collect_message_comments( + path_map: &HashMap, String>, + msg_path: &[i32], + msg_fqn: &str, + msg: &DescriptorProto, + out: &mut HashMap, +) { + // Message itself + if let Some(comment) = path_map.get(msg_path) { + out.insert(msg_fqn.to_string(), comment.clone()); + } + + // Fields + for (i, field) in msg.field.iter().enumerate() { + let field_name = field.name.as_deref().unwrap_or(""); + let fqn = format!("{}.{}", msg_fqn, field_name); + let mut path = msg_path.to_vec(); + path.extend_from_slice(&[MSG_FIELD, i as i32]); + if let Some(comment) = path_map.get(&path) { + out.insert(fqn, comment.clone()); + } + } + + // Oneofs + for (i, oneof) in msg.oneof_decl.iter().enumerate() { + let oneof_name = oneof.name.as_deref().unwrap_or(""); + let fqn = format!("{}.{}", msg_fqn, oneof_name); + let mut path = msg_path.to_vec(); + path.extend_from_slice(&[MSG_ONEOF_DECL, i as i32]); + if let Some(comment) = path_map.get(&path) { + out.insert(fqn, comment.clone()); + } + } + + // Nested enums + for (i, enum_type) in msg.enum_type.iter().enumerate() { + let enum_name = enum_type.name.as_deref().unwrap_or(""); + let fqn = format!("{}.{}", msg_fqn, enum_name); + let mut path = msg_path.to_vec(); + path.extend_from_slice(&[MSG_ENUM_TYPE, i as i32]); + collect_enum_comments(path_map, &path, &fqn, enum_type, out); + } + + // Nested messages (recurse) + for (i, nested) in msg.nested_type.iter().enumerate() { + let nested_name = nested.name.as_deref().unwrap_or(""); + let fqn = format!("{}.{}", msg_fqn, nested_name); + let mut path = msg_path.to_vec(); + path.extend_from_slice(&[MSG_NESTED_TYPE, i as i32]); + collect_message_comments(path_map, &path, &fqn, nested, out); + } +} + +/// Collect comments for an enum and its values. +fn collect_enum_comments( + path_map: &HashMap, String>, + enum_path: &[i32], + enum_fqn: &str, + enum_desc: &crate::generated::descriptor::EnumDescriptorProto, + out: &mut HashMap, +) { + // Enum itself + if let Some(comment) = path_map.get(enum_path) { + out.insert(enum_fqn.to_string(), comment.clone()); + } + + // Enum values + for (i, value) in enum_desc.value.iter().enumerate() { + let value_name = value.name.as_deref().unwrap_or(""); + let fqn = format!("{}.{}", enum_fqn, value_name); + let mut path = enum_path.to_vec(); + path.extend_from_slice(&[ENUM_VALUE, i as i32]); + if let Some(comment) = path_map.get(&path) { + out.insert(fqn, comment.clone()); + } + } +} + +/// Join a package and a name into an FQN (no leading dot). +fn fqn_join(package: &str, name: &str) -> String { + if package.is_empty() { + name.to_string() + } else { + format!("{}.{}", package, name) + } +} + +/// Convert a comment string into `#[doc = "..."]` token stream attributes. +/// +/// Each line of the comment becomes a separate `#[doc = "..."]` attribute +/// so that rustdoc renders them as a contiguous doc block. Returns empty +/// tokens if the comment is `None`. +pub fn doc_attrs(comment: Option<&str>) -> TokenStream { + match comment { + None => quote! {}, + Some(text) => doc_lines_to_tokens(text), + } +} + +/// Combine an optional proto comment with a tag line into `#[doc = "..."]` attrs. +/// +/// If a proto comment is present, a blank `#[doc = ""]` separator is inserted +/// between it and the tag line so that rustdoc renders them as separate +/// paragraphs. +pub fn doc_attrs_with_tag(comment: Option<&str>, tag: &str) -> TokenStream { + match comment { + None => doc_lines_to_tokens(tag), + Some(text) => { + let combined = format!("{text}\n\n{tag}"); + doc_lines_to_tokens(&combined) + } + } +} + +/// Convert text into `#[doc = " ..."]` tokens, ensuring each non-empty line +/// has a leading space so that `prettyplease` renders `/// text` instead of +/// `///text`. +/// +/// Indented code blocks (4+ spaces) from proto source comments contain +/// C++/Java/Python examples, not Rust. We wrap them in ```` ```text ```` +/// fences so rustdoc renders them as plain text instead of trying to +/// compile them as Rust doc tests. +fn doc_lines_to_tokens(text: &str) -> TokenStream { + let raw_lines: Vec<&str> = text.lines().collect(); + let mut lines: Vec = Vec::with_capacity(raw_lines.len()); + let mut in_code_block = false; + let mut in_user_fence = false; + + for (idx, line) in raw_lines.iter().enumerate() { + // Proto authors may write markdown fences directly. Pass them + // through and suppress the indented-block heuristic inside so we + // don't nest a synthetic ```text fence. + if line.trim_start().starts_with("```") && !in_code_block { + in_user_fence = !in_user_fence; + lines.push(if line.starts_with(' ') { + line.to_string() + } else { + format!(" {line}") + }); + continue; + } + if in_user_fence { + lines.push(if line.is_empty() { + String::new() + } else if line.starts_with(' ') { + line.to_string() + } else { + format!(" {line}") + }); + continue; + } + + let is_indented = line.starts_with(" ") || line.starts_with('\t'); + + if is_indented && !in_code_block { + // Open a text fence before the first indented line. + lines.push(" ```text".to_string()); + in_code_block = true; + } else if in_code_block && !is_indented { + // Non-indented line (including empty) closes the code block, + // but only if there isn't another indented line coming next. + if line.is_empty() { + // Look ahead: if the next non-empty line is indented, keep + // the block open (it's a blank line within the example). + let next_is_indented = raw_lines[idx + 1..] + .iter() + .find(|l| !l.is_empty()) + .is_some_and(|l| l.starts_with(" ") || l.starts_with('\t')); + if next_is_indented { + lines.push(String::new()); + continue; + } + } + lines.push(" ```".to_string()); + in_code_block = false; + } + + if in_code_block { + // Strip the 4-space / tab indent since we're inside a fence. + let stripped = line + .strip_prefix(" ") + .or_else(|| line.strip_prefix('\t')) + .unwrap_or(line); + if stripped.is_empty() { + lines.push(String::new()); + } else if stripped.starts_with(' ') { + lines.push(stripped.to_string()); + } else { + lines.push(format!(" {stripped}")); + } + } else if line.is_empty() { + lines.push(String::new()); + } else if line.starts_with(' ') { + lines.push(line.to_string()); + } else { + lines.push(format!(" {line}")); + } + } + + if in_code_block { + lines.push(" ```".to_string()); + } + + quote! { + #( #[doc = #lines] )* + } +} + +/// Format a `SourceCodeInfo.Location` into a doc-comment string. +/// +/// Combines leading detached comments, leading comments, and trailing +/// comments. Returns `None` if no comments are present. +/// +/// Proto comments use `//` or `/* */` syntax. protoc strips the leading +/// `// ` or ` * ` prefix and stores plain text. Each line is separated by +/// `\n`. We preserve this structure so that `#[doc = "..."]` renders +/// correctly in rustdoc. +/// +/// Leading newlines and trailing whitespace are stripped, but leading +/// spaces on the first content line are preserved so that indented code +/// blocks survive for the fencing heuristic in [`doc_lines_to_tokens`]. +/// +/// When multiple parts (detached, leading, trailing) are present they are +/// joined with a blank line. If an indented code block spans across parts, +/// it will be fenced as two separate `text` blocks — this is a known +/// limitation and acceptable since each proto comment section is +/// conceptually distinct. +fn format_comment( + location: &crate::generated::descriptor::source_code_info::Location, +) -> Option { + let mut parts: Vec<&str> = Vec::new(); + + for detached in &location.leading_detached_comments { + let trimmed = detached.trim_start_matches('\n').trim_end(); + if !trimmed.is_empty() { + parts.push(trimmed); + } + } + + if let Some(ref leading) = location.leading_comments { + let trimmed = leading.trim_start_matches('\n').trim_end(); + if !trimmed.is_empty() { + parts.push(trimmed); + } + } + + if let Some(ref trailing) = location.trailing_comments { + let trimmed = trailing.trim_start_matches('\n').trim_end(); + if !trimmed.is_empty() { + parts.push(trimmed); + } + } + + if parts.is_empty() { + return None; + } + + Some(parts.join("\n\n")) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::generated::descriptor::source_code_info::Location; + use crate::generated::descriptor::{ + EnumDescriptorProto, EnumValueDescriptorProto, FieldDescriptorProto, OneofDescriptorProto, + SourceCodeInfo, + }; + + fn make_location(path: Vec, leading: Option<&str>, trailing: Option<&str>) -> Location { + Location { + path, + leading_comments: leading.map(|s| s.to_string()), + trailing_comments: trailing.map(|s| s.to_string()), + ..Default::default() + } + } + + fn make_file_with_locations( + package: &str, + messages: Vec, + enums: Vec, + locations: Vec, + ) -> FileDescriptorProto { + let mut file = FileDescriptorProto::default(); + file.package = Some(package.to_string()); + file.message_type = messages; + file.enum_type = enums; + let mut sci = SourceCodeInfo::default(); + sci.location = locations; + file.source_code_info = sci.into(); + file + } + + fn make_field(name: &str) -> FieldDescriptorProto { + FieldDescriptorProto { + name: Some(name.to_string()), + ..Default::default() + } + } + + fn make_enum(name: &str, values: &[&str]) -> EnumDescriptorProto { + EnumDescriptorProto { + name: Some(name.to_string()), + value: values + .iter() + .enumerate() + .map(|(i, v)| EnumValueDescriptorProto { + name: Some(v.to_string()), + number: Some(i as i32), + ..Default::default() + }) + .collect(), + ..Default::default() + } + } + + #[test] + fn test_empty_source_code_info() { + let file = FileDescriptorProto::default(); + let map = fqn_comments(&file); + assert!(map.is_empty()); + } + + #[test] + fn test_message_comment() { + let file = make_file_with_locations( + "pkg", + vec![DescriptorProto { + name: Some("Person".to_string()), + ..Default::default() + }], + vec![], + vec![make_location(vec![4, 0], Some("A test message.\n"), None)], + ); + let map = fqn_comments(&file); + assert_eq!( + map.get("pkg.Person").map(|s| s.as_str()), + Some("A test message.") + ); + } + + #[test] + fn test_field_comment() { + let file = make_file_with_locations( + "pkg", + vec![DescriptorProto { + name: Some("User".to_string()), + field: vec![make_field("email")], + ..Default::default() + }], + vec![], + vec![make_location( + vec![4, 0, 2, 0], + Some("The user's email.\n"), + None, + )], + ); + let map = fqn_comments(&file); + assert_eq!( + map.get("pkg.User.email").map(|s| s.as_str()), + Some("The user's email.") + ); + } + + #[test] + fn test_enum_and_value_comments() { + let file = make_file_with_locations( + "pkg", + vec![], + vec![make_enum("Status", &["UNKNOWN", "ACTIVE"])], + vec![ + make_location(vec![5, 0], Some("Status enum.\n"), None), + make_location(vec![5, 0, 2, 0], None, Some("Unknown status.\n")), + make_location(vec![5, 0, 2, 1], Some("Active status.\n"), None), + ], + ); + let map = fqn_comments(&file); + assert_eq!( + map.get("pkg.Status").map(|s| s.as_str()), + Some("Status enum.") + ); + assert_eq!( + map.get("pkg.Status.UNKNOWN").map(|s| s.as_str()), + Some("Unknown status.") + ); + assert_eq!( + map.get("pkg.Status.ACTIVE").map(|s| s.as_str()), + Some("Active status.") + ); + } + + #[test] + fn test_oneof_comment() { + let file = make_file_with_locations( + "pkg", + vec![DescriptorProto { + name: Some("Event".to_string()), + oneof_decl: vec![OneofDescriptorProto { + name: Some("payload".to_string()), + ..Default::default() + }], + ..Default::default() + }], + vec![], + vec![make_location( + vec![4, 0, 8, 0], + Some("The payload.\n"), + None, + )], + ); + let map = fqn_comments(&file); + assert_eq!( + map.get("pkg.Event.payload").map(|s| s.as_str()), + Some("The payload.") + ); + } + + #[test] + fn test_nested_message_comment() { + let file = make_file_with_locations( + "pkg", + vec![DescriptorProto { + name: Some("Outer".to_string()), + nested_type: vec![DescriptorProto { + name: Some("Inner".to_string()), + field: vec![make_field("value")], + ..Default::default() + }], + ..Default::default() + }], + vec![], + vec![ + make_location(vec![4, 0, 3, 0], Some("A nested type.\n"), None), + make_location(vec![4, 0, 3, 0, 2, 0], Some("The value.\n"), None), + ], + ); + let map = fqn_comments(&file); + assert_eq!( + map.get("pkg.Outer.Inner").map(|s| s.as_str()), + Some("A nested type.") + ); + assert_eq!( + map.get("pkg.Outer.Inner.value").map(|s| s.as_str()), + Some("The value.") + ); + } + + #[test] + fn test_nested_enum_in_message_comment() { + // Path [4, 0, 4, 0] = message_type[0].enum_type[0] (MSG_ENUM_TYPE = 4). + let file = make_file_with_locations( + "pkg", + vec![DescriptorProto { + name: Some("Container".to_string()), + enum_type: vec![make_enum("Kind", &["UNSET", "A"])], + ..Default::default() + }], + vec![], + vec![ + make_location(vec![4, 0, 4, 0], Some("Kind of thing.\n"), None), + make_location(vec![4, 0, 4, 0, 2, 1], Some("The A kind.\n"), None), + ], + ); + let map = fqn_comments(&file); + assert_eq!( + map.get("pkg.Container.Kind").map(|s| s.as_str()), + Some("Kind of thing.") + ); + assert_eq!( + map.get("pkg.Container.Kind.A").map(|s| s.as_str()), + Some("The A kind.") + ); + } + + #[test] + fn test_leading_and_trailing_combined() { + let file = make_file_with_locations( + "pkg", + vec![DescriptorProto { + name: Some("Msg".to_string()), + ..Default::default() + }], + vec![], + vec![make_location( + vec![4, 0], + Some("Leading.\n"), + Some("Trailing.\n"), + )], + ); + let map = fqn_comments(&file); + assert_eq!( + map.get("pkg.Msg").map(|s| s.as_str()), + Some("Leading.\n\nTrailing.") + ); + } + + #[test] + fn test_detached_comments() { + let file = make_file_with_locations( + "pkg", + vec![DescriptorProto { + name: Some("Msg".to_string()), + ..Default::default() + }], + vec![], + vec![{ + let mut loc = make_location(vec![4, 0], Some("Main.\n"), None); + loc.leading_detached_comments = vec!["Detached.\n".to_string()]; + loc + }], + ); + let map = fqn_comments(&file); + assert_eq!( + map.get("pkg.Msg").map(|s| s.as_str()), + Some("Detached.\n\nMain.") + ); + } + + #[test] + fn test_whitespace_only_comments_ignored() { + let file = make_file_with_locations( + "pkg", + vec![DescriptorProto { + name: Some("Msg".to_string()), + ..Default::default() + }], + vec![], + vec![make_location(vec![4, 0], Some(" \n "), Some(" "))], + ); + let map = fqn_comments(&file); + assert!(map.get("pkg.Msg").is_none()); + } + + #[test] + fn test_empty_package() { + let file = make_file_with_locations( + "", + vec![DescriptorProto { + name: Some("Root".to_string()), + field: vec![make_field("id")], + ..Default::default() + }], + vec![], + vec![ + make_location(vec![4, 0], Some("Root msg.\n"), None), + make_location(vec![4, 0, 2, 0], Some("The id.\n"), None), + ], + ); + let map = fqn_comments(&file); + assert_eq!(map.get("Root").map(|s| s.as_str()), Some("Root msg.")); + assert_eq!(map.get("Root.id").map(|s| s.as_str()), Some("The id.")); + } + + // --- doc_lines_to_tokens ----------------------------------------------- + + fn doc_tokens(text: &str) -> String { + doc_lines_to_tokens(text).to_string() + } + + #[test] + fn test_doc_plain_text_gets_leading_space() { + let out = doc_tokens("hello world"); + assert_eq!(out, "# [doc = \" hello world\"]"); + } + + #[test] + fn test_doc_line_already_spaced_kept_as_is() { + let out = doc_tokens(" already spaced"); + assert_eq!(out, "# [doc = \" already spaced\"]"); + } + + #[test] + fn test_doc_empty_line_preserved() { + let out = doc_tokens("a\n\nb"); + assert_eq!(out, "# [doc = \" a\"] # [doc = \"\"] # [doc = \" b\"]"); + } + + #[test] + fn test_doc_indented_block_gets_text_fence() { + let out = doc_tokens("Example:\n x = 1;\n y = 2;"); + assert!(out.contains("```text"), "should open text fence: {out}"); + assert!(out.contains("\" x = 1;\""), "indent stripped: {out}"); + assert!(out.ends_with("# [doc = \" ```\"]"), "should close: {out}"); + } + + #[test] + fn test_doc_blank_line_within_indented_block_keeps_fence_open() { + let out = doc_tokens(" line1\n\n line2"); + let fence_count = out.matches("```").count(); + assert_eq!( + fence_count, 2, + "one open + one close, not two blocks: {out}" + ); + } + + #[test] + fn test_doc_trailing_unclosed_block_gets_closing_fence() { + let out = doc_tokens("text\n code"); + assert!(out.ends_with("# [doc = \" ```\"]"), "trailing close: {out}"); + } + + #[test] + fn test_doc_tab_indent_detected() { + let out = doc_tokens("\tcode line"); + assert!(out.contains("```text"), "tab triggers fence: {out}"); + } + + #[test] + fn test_doc_empty_input() { + assert_eq!(doc_tokens(""), ""); + } + + #[test] + fn test_doc_user_markdown_fence_passes_through() { + // Proto authors may write markdown fences directly. These should + // pass through to rustdoc unmodified — no extra `text` fence added. + let out = doc_tokens("Example:\n```go\nx := 1\n```"); + assert_eq!( + out.matches("```").count(), + 2, + "user fence preserved, not double-fenced: {out}" + ); + assert!(!out.contains("```text"), "no synthetic fence: {out}"); + } + + #[test] + fn test_doc_user_fence_with_indented_content_not_double_fenced() { + // Edge case: user-written fence with 4-space-indented content inside. + // The indented-block heuristic must not fire inside an existing fence. + let out = doc_tokens("```\n int x = 1;\n```"); + assert_eq!( + out.matches("```").count(), + 2, + "no nested fence inside user fence: {out}" + ); + } + + // --- format_comment indentation preservation ---------------------------- + + #[test] + fn test_format_comment_preserves_leading_indent() { + let loc = Location { + leading_comments: Some(" int x = 1;\n int y = 2;\n".to_string()), + ..Default::default() + }; + let out = format_comment(&loc).unwrap(); + assert!( + out.starts_with(" "), + "leading indent must survive for fencing: {out:?}" + ); + } + + #[test] + fn test_format_comment_strips_leading_newlines_keeps_spaces() { + let loc = Location { + leading_comments: Some("\n\n hello\n".to_string()), + ..Default::default() + }; + assert_eq!(format_comment(&loc).as_deref(), Some(" hello")); + } +} diff --git a/buffa-codegen/src/context.rs b/buffa-codegen/src/context.rs index 013f616..ad1d1cc 100644 --- a/buffa-codegen/src/context.rs +++ b/buffa-codegen/src/context.rs @@ -36,6 +36,19 @@ pub struct CodeGenContext<'a> { /// enum-level `enum_type` into field options (verified 2026-03), so /// callers must look this up via `is_enum_closed`. enum_closedness: HashMap, + /// Map from fully-qualified protobuf element name to its source comment. + /// + /// Keys use dotted FQN form without a leading dot, matching the `proto_fqn` + /// values already threaded through codegen: `"pkg.Message"`, + /// `"pkg.Message.field_name"`, `"pkg.Enum.VALUE_NAME"`, + /// `"pkg.Message.oneof_name"`. + /// + /// Built by walking each file's descriptor tree alongside its + /// `SourceCodeInfo` (which uses index-based paths). This up-front + /// translation means codegen call sites can look up comments by the + /// proto FQN they already have, rather than threading index-based paths + /// through every function signature. + comment_map: HashMap, } impl<'a> CodeGenContext<'a> { @@ -52,8 +65,10 @@ impl<'a> CodeGenContext<'a> { let mut type_map = HashMap::new(); let mut package_of = HashMap::new(); let mut enum_closedness = HashMap::new(); + let mut comment_map = HashMap::new(); for file in files { + comment_map.extend(crate::comments::fqn_comments(file)); let package = file.package.as_deref().unwrap_or(""); let file_features = features::for_file(file); let proto_prefix = if package.is_empty() { @@ -129,6 +144,7 @@ impl<'a> CodeGenContext<'a> { type_map, package_of, enum_closedness, + comment_map, } } @@ -157,6 +173,18 @@ impl<'a> CodeGenContext<'a> { self.type_map.get(proto_fqn).map(|s| s.as_str()) } + /// Look up the source comment for a protobuf element by FQN. + /// + /// `fqn` uses the same dotted form as `proto_fqn` throughout codegen + /// (no leading dot). For sub-elements, append the element name: + /// - Message: `"pkg.Message"` + /// - Field: `"pkg.Message.field_name"` + /// - Enum value: `"pkg.Enum.VALUE_NAME"` + /// - Oneof: `"pkg.Message.oneof_name"` + pub fn comment(&self, fqn: &str) -> Option<&str> { + self.comment_map.get(fqn).map(|s| s.as_str()) + } + /// Look up whether an enum (by fully-qualified proto name) is closed. /// /// Returns `None` if the enum is not in this compilation set (e.g., an diff --git a/buffa-codegen/src/enumeration.rs b/buffa-codegen/src/enumeration.rs index 8fed4c8..7d2533b 100644 --- a/buffa-codegen/src/enumeration.rs +++ b/buffa-codegen/src/enumeration.rs @@ -99,6 +99,7 @@ pub fn generate_enum( ctx: &CodeGenContext, enum_desc: &EnumDescriptorProto, rust_name: &str, + proto_fqn: &str, features: &ResolvedFeatures, _resolver: &crate::imports::ImportResolver, ) -> Result { @@ -129,10 +130,13 @@ pub fn generate_enum( .number .ok_or(CodeGenError::MissingField("enum_value.number"))?; let variant_ident = crate::message::make_field_ident(value_name); + let value_fqn = format!("{}.{}", proto_fqn, value_name); + let variant_doc = crate::comments::doc_attrs(ctx.comment(&value_fqn)); if let Some(&primary_name) = seen.get(&number) { let primary_ident = crate::message::make_field_ident(primary_name); alias_consts.push(quote! { + #variant_doc #[allow(non_upper_case_globals)] pub const #variant_ident: Self = Self::#primary_ident; }); @@ -148,7 +152,7 @@ pub fn generate_enum( if number == 0 && zero_variant.is_none() { zero_variant = Some(variant_ident.clone()); } - variants.push(quote! { #variant_ident = #number }); + variants.push(quote! { #variant_doc #variant_ident = #number }); from_i32_arms.push(quote! { #number => ::core::option::Option::Some(Self::#variant_ident) }); @@ -202,7 +206,10 @@ pub fn generate_enum( quote! {} }; + let enum_doc = crate::comments::doc_attrs(ctx.comment(proto_fqn)); + Ok(quote! { + #enum_doc #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #arbitrary_derive #[repr(i32)] diff --git a/buffa-codegen/src/generated/google.protobuf.compiler.plugin.rs b/buffa-codegen/src/generated/google.protobuf.compiler.plugin.rs index 03400e0..c79758a 100644 --- a/buffa-codegen/src/generated/google.protobuf.compiler.plugin.rs +++ b/buffa-codegen/src/generated/google.protobuf.compiler.plugin.rs @@ -1,6 +1,7 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: google/protobuf/compiler/plugin.proto +/// The version number of protocol compiler. #[derive(Clone, PartialEq, Default)] pub struct Version { /// Field 1: `major` @@ -9,6 +10,9 @@ pub struct Version { pub minor: Option, /// Field 3: `patch` pub patch: Option, + /// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + /// be empty for mainline stable releases. + /// /// Field 4: `suffix` pub suffix: Option<::buffa::alloc::string::String>, #[doc(hidden)] @@ -181,16 +185,49 @@ impl ::buffa::ExtensionSet for Version { &mut self.__buffa_unknown_fields } } +/// An encoded CodeGeneratorRequest is written to the plugin's stdin. #[derive(Clone, PartialEq, Default)] pub struct CodeGeneratorRequest { + /// The .proto files that were explicitly listed on the command-line. The + /// code generator should generate code only for these files. Each file's + /// descriptor will be included in proto_file, below. + /// /// Field 1: `file_to_generate` pub file_to_generate: ::buffa::alloc::vec::Vec<::buffa::alloc::string::String>, + /// The generator parameter passed on the command-line. + /// /// Field 2: `parameter` pub parameter: Option<::buffa::alloc::string::String>, + /// FileDescriptorProtos for all files in files_to_generate and everything + /// they import. The files will appear in topological order, so each file + /// appears before any file that imports it. + /// + /// Note: the files listed in files_to_generate will include runtime-retention + /// options only, but all other files will include source-retention options. + /// The source_file_descriptors field below is available in case you need + /// source-retention options for files_to_generate. + /// + /// protoc guarantees that all proto_files will be written after + /// the fields above, even though this is not technically guaranteed by the + /// protobuf wire format. This theoretically could allow a plugin to stream + /// in the FileDescriptorProtos and handle them one by one rather than read + /// the entire set into memory at once. However, as of this writing, this + /// is not similarly optimized on protoc's end -- it will store all fields in + /// memory at once before sending them to the plugin. + /// + /// Type names of fields and extensions in the FileDescriptorProto are always + /// fully qualified. + /// /// Field 15: `proto_file` pub proto_file: ::buffa::alloc::vec::Vec, + /// File descriptors with all options, including source-retention options. + /// These descriptors are only provided for the files listed in + /// files_to_generate. + /// /// Field 17: `source_file_descriptors` pub source_file_descriptors: ::buffa::alloc::vec::Vec, + /// The version number of protocol compiler. + /// /// Field 3: `compiler_version` pub compiler_version: ::buffa::MessageField, #[doc(hidden)] @@ -417,14 +454,37 @@ impl ::buffa::ExtensionSet for CodeGeneratorRequest { &mut self.__buffa_unknown_fields } } +/// The plugin writes an encoded CodeGeneratorResponse to stdout. #[derive(Clone, PartialEq, Default)] pub struct CodeGeneratorResponse { + /// Error message. If non-empty, code generation failed. The plugin process + /// should exit with status code zero even if it reports an error in this way. + /// + /// This should be used to indicate errors in .proto files which prevent the + /// code generator from generating correct code. Errors which indicate a + /// problem in protoc itself -- such as the input CodeGeneratorRequest being + /// unparseable -- should be reported by writing a message to stderr and + /// exiting with a non-zero status code. + /// /// Field 1: `error` pub error: Option<::buffa::alloc::string::String>, + /// A bitmask of supported features that the code generator supports. + /// This is a bitwise "or" of values from the Feature enum. + /// /// Field 2: `supported_features` pub supported_features: Option, + /// The minimum edition this plugin supports. This will be treated as an + /// Edition enum, but we want to allow unknown values. It should be specified + /// according the edition enum value, *not* the edition number. Only takes + /// effect for plugins that have FEATURE_SUPPORTS_EDITIONS set. + /// /// Field 3: `minimum_edition` pub minimum_edition: Option, + /// The maximum edition this plugin supports. This will be treated as an + /// Edition enum, but we want to allow unknown values. It should be specified + /// according the edition enum value, *not* the edition number. Only takes + /// effect for plugins that have FEATURE_SUPPORTS_EDITIONS set. + /// /// Field 4: `maximum_edition` pub maximum_edition: Option, /// Field 15: `file` @@ -634,6 +694,7 @@ impl ::buffa::ExtensionSet for CodeGeneratorResponse { pub mod code_generator_response { #[allow(unused_imports)] use super::*; + /// Sync with code_generator.h. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum Feature { @@ -678,14 +739,71 @@ pub mod code_generator_response { } } } + /// Represents a single generated file. #[derive(Clone, PartialEq, Default)] pub struct File { + /// The file name, relative to the output directory. The name must not + /// contain "." or ".." components and must be relative, not be absolute (so, + /// the file cannot lie outside the output directory). "/" must be used as + /// the path separator, not "\". + /// + /// If the name is omitted, the content will be appended to the previous + /// file. This allows the generator to break large files into small chunks, + /// and allows the generated text to be streamed back to protoc so that large + /// files need not reside completely in memory at one time. Note that as of + /// this writing protoc does not optimize for this -- it will read the entire + /// CodeGeneratorResponse before writing files to disk. + /// /// Field 1: `name` pub name: Option<::buffa::alloc::string::String>, + /// If non-empty, indicates that the named file should already exist, and the + /// content here is to be inserted into that file at a defined insertion + /// point. This feature allows a code generator to extend the output + /// produced by another code generator. The original generator may provide + /// insertion points by placing special annotations in the file that look + /// like: + /// @@protoc_insertion_point(NAME) + /// The annotation can have arbitrary text before and after it on the line, + /// which allows it to be placed in a comment. NAME should be replaced with + /// an identifier naming the point -- this is what other generators will use + /// as the insertion_point. Code inserted at this point will be placed + /// immediately above the line containing the insertion point (thus multiple + /// insertions to the same point will come out in the order they were added). + /// The double-@ is intended to make it unlikely that the generated code + /// could contain things that look like insertion points by accident. + /// + /// For example, the C++ code generator places the following line in the + /// .pb.h files that it generates: + /// // @@protoc_insertion_point(namespace_scope) + /// This line appears within the scope of the file's package namespace, but + /// outside of any particular class. Another plugin can then specify the + /// insertion_point "namespace_scope" to generate additional classes or + /// other declarations that should be placed in this scope. + /// + /// Note that if the line containing the insertion point begins with + /// whitespace, the same whitespace will be added to every line of the + /// inserted text. This is useful for languages like Python, where + /// indentation matters. In these languages, the insertion point comment + /// should be indented the same amount as any inserted code will need to be + /// in order to work correctly in that context. + /// + /// The code generator that generates the initial file and the one which + /// inserts into it must both run as part of a single invocation of protoc. + /// Code generators are executed in the order in which they appear on the + /// command line. + /// + /// If |insertion_point| is present, |name| must also be present. + /// /// Field 2: `insertion_point` pub insertion_point: Option<::buffa::alloc::string::String>, + /// The file contents. + /// /// Field 15: `content` pub content: Option<::buffa::alloc::string::String>, + /// Information describing the file content being inserted. If an insertion + /// point is used, this information will be appropriately offset and inserted + /// into the code generation metadata for the generated files. + /// /// Field 16: `generated_code_info` pub generated_code_info: ::buffa::MessageField, #[doc(hidden)] diff --git a/buffa-codegen/src/generated/google.protobuf.descriptor.rs b/buffa-codegen/src/generated/google.protobuf.descriptor.rs index 8c7c0b9..3de7c27 100644 --- a/buffa-codegen/src/generated/google.protobuf.descriptor.rs +++ b/buffa-codegen/src/generated/google.protobuf.descriptor.rs @@ -1,21 +1,38 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: google/protobuf/descriptor.proto +/// The full set of known editions. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum Edition { + /// A placeholder for an unknown edition value. EDITION_UNKNOWN = 0i32, + /// A placeholder edition for specifying default behaviors *before* a feature + /// was first introduced. This is effectively an "infinite past". EDITION_LEGACY = 900i32, + /// Legacy syntax "editions". These pre-date editions, but behave much like + /// distinct editions. These can't be used to specify the edition of proto + /// files, but feature definitions must supply proto2/proto3 defaults for + /// backwards compatibility. EDITION_PROTO2 = 998i32, EDITION_PROTO3 = 999i32, + /// Editions that have been released. The specific values are arbitrary and + /// should not be depended on, but they will always be time-ordered for easy + /// comparison. EDITION_2023 = 1000i32, EDITION_2024 = 1001i32, + /// A placeholder edition for developing and testing unscheduled features. EDITION_UNSTABLE = 9999i32, + /// Placeholder editions for testing feature resolution. These should not be + /// used or relied on outside of tests. EDITION_1_TEST_ONLY = 1i32, EDITION_2_TEST_ONLY = 2i32, EDITION_99997_TEST_ONLY = 99997i32, EDITION_99998_TEST_ONLY = 99998i32, EDITION_99999_TEST_ONLY = 99999i32, + /// Placeholder for specifying unbounded edition support. This should only + /// ever be used by plugins that can expect to never require any changes to + /// support a new edition. EDITION_MAX = 2147483647i32, } impl ::core::default::Default for Edition { @@ -91,6 +108,11 @@ impl ::buffa::Enumeration for Edition { } } } +/// Describes the 'visibility' of a symbol with respect to the proto import +/// system. Symbols can only be imported when the visibility rules do not prevent +/// it (ex: local symbols cannot be imported). Visibility modifiers can only set +/// on `message` and `enum` as they are the only types available to be referenced +/// from other files. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum SymbolVisibility { @@ -131,6 +153,8 @@ impl ::buffa::Enumeration for SymbolVisibility { } } } +/// The protocol compiler can output a FileDescriptorSet containing the .proto +/// files it parses. #[derive(Clone, PartialEq, Default)] pub struct FileDescriptorSet { /// Field 1: `file` @@ -243,20 +267,37 @@ impl ::buffa::ExtensionSet for FileDescriptorSet { &mut self.__buffa_unknown_fields } } +/// Describes a complete .proto file. #[derive(Clone, PartialEq, Default)] pub struct FileDescriptorProto { + /// file name, relative to root of source tree + /// /// Field 1: `name` pub name: Option<::buffa::alloc::string::String>, + /// e.g. "foo", "foo.bar", etc. + /// /// Field 2: `package` pub package: Option<::buffa::alloc::string::String>, + /// Names of files imported by this file. + /// /// Field 3: `dependency` pub dependency: ::buffa::alloc::vec::Vec<::buffa::alloc::string::String>, + /// Indexes of the public imported files in the dependency list above. + /// /// Field 10: `public_dependency` pub public_dependency: ::buffa::alloc::vec::Vec, + /// Indexes of the weak imported files in the dependency list. + /// For Google-internal migration only. Do not use. + /// /// Field 11: `weak_dependency` pub weak_dependency: ::buffa::alloc::vec::Vec, + /// Names of files imported by this file purely for the purpose of providing + /// option extensions. These are excluded from the dependency list above. + /// /// Field 15: `option_dependency` pub option_dependency: ::buffa::alloc::vec::Vec<::buffa::alloc::string::String>, + /// All top-level definitions in this file. + /// /// Field 4: `message_type` pub message_type: ::buffa::alloc::vec::Vec, /// Field 5: `enum_type` @@ -267,10 +308,28 @@ pub struct FileDescriptorProto { pub extension: ::buffa::alloc::vec::Vec, /// Field 8: `options` pub options: ::buffa::MessageField, + /// This field contains optional information about the original source code. + /// You may safely remove this entire field without harming runtime + /// functionality of the descriptors -- the information is needed only by + /// development tools. + /// /// Field 9: `source_code_info` pub source_code_info: ::buffa::MessageField, + /// The syntax of the proto file. + /// The supported values are "proto2", "proto3", and "editions". + /// + /// If `edition` is present, this value must be "editions". + /// WARNING: This field should only be used by protobuf plugins or special + /// cases like the proto compiler. Other uses are discouraged and + /// developers should rely on the protoreflect APIs for their client language. + /// /// Field 12: `syntax` pub syntax: Option<::buffa::alloc::string::String>, + /// The edition of the proto file. + /// WARNING: This field should only be used by protobuf plugins or special + /// cases like the proto compiler. Other uses are discouraged and + /// developers should rely on the protoreflect APIs for their client language. + /// /// Field 14: `edition` pub edition: Option, #[doc(hidden)] @@ -770,6 +829,7 @@ impl ::buffa::ExtensionSet for FileDescriptorProto { &mut self.__buffa_unknown_fields } } +/// Describes a message type. #[derive(Clone, PartialEq, Default)] pub struct DescriptorProto { /// Field 1: `name` @@ -790,8 +850,13 @@ pub struct DescriptorProto { pub options: ::buffa::MessageField, /// Field 9: `reserved_range` pub reserved_range: ::buffa::alloc::vec::Vec, + /// Reserved field names, which may not be used by fields in the same message. + /// A given name may only be reserved once. + /// /// Field 10: `reserved_name` pub reserved_name: ::buffa::alloc::vec::Vec<::buffa::alloc::string::String>, + /// Support for `export` and `local` keywords on enums. + /// /// Field 11: `visibility` pub visibility: Option, #[doc(hidden)] @@ -1191,8 +1256,12 @@ pub mod descriptor_proto { use super::*; #[derive(Clone, PartialEq, Default)] pub struct ExtensionRange { + /// Inclusive. + /// /// Field 1: `start` pub start: Option, + /// Exclusive. + /// /// Field 2: `end` pub end: Option, /// Field 3: `options` @@ -1353,10 +1422,17 @@ pub mod descriptor_proto { &mut self.__buffa_unknown_fields } } + /// Range of reserved tag numbers. Reserved tag numbers may not be used by + /// fields or extension ranges in the same message. Reserved ranges may + /// not overlap. #[derive(Clone, PartialEq, Default)] pub struct ReservedRange { + /// Inclusive. + /// /// Field 1: `start` pub start: Option, + /// Exclusive. + /// /// Field 2: `end` pub end: Option, #[doc(hidden)] @@ -1487,12 +1563,24 @@ pub mod descriptor_proto { } #[derive(Clone, PartialEq, Default)] pub struct ExtensionRangeOptions { + /// The parser stores options it doesn't recognize here. See above. + /// /// Field 999: `uninterpreted_option` pub uninterpreted_option: ::buffa::alloc::vec::Vec, + /// For external users: DO NOT USE. We are in the process of open sourcing + /// extension declaration and executing internal cleanups before it can be + /// used externally. + /// /// Field 2: `declaration` pub declaration: ::buffa::alloc::vec::Vec, + /// Any features defined in the specific edition. + /// /// Field 50: `features` pub features: ::buffa::MessageField, + /// The verification state of the range. + /// TODO: flip the default to DECLARATION once all empty ranges + /// are marked as UNVERIFIED. + /// /// Field 3: `verification` pub verification: Option, #[doc(hidden)] @@ -1699,9 +1787,11 @@ impl ::buffa::ExtensionSet for ExtensionRangeOptions { pub mod extension_range_options { #[allow(unused_imports)] use super::*; + /// The verification state of the extension range. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum VerificationState { + /// All the extensions of the range must be declared. DECLARATION = 0i32, UNVERIFIED = 1i32, } @@ -1737,14 +1827,30 @@ pub mod extension_range_options { } #[derive(Clone, PartialEq, Default)] pub struct Declaration { + /// The extension number declared within the extension range. + /// /// Field 1: `number` pub number: Option, + /// The fully-qualified name of the extension field. There must be a leading + /// dot in front of the full name. + /// /// Field 2: `full_name` pub full_name: Option<::buffa::alloc::string::String>, + /// The fully-qualified type name of the extension field. Unlike + /// Metadata.type, Declaration.type must have a leading dot for messages + /// and enums. + /// /// Field 3: `type` pub r#type: Option<::buffa::alloc::string::String>, + /// If true, indicates that the number is reserved in the extension range, + /// and any extension field with the number will fail to compile. Set this + /// when a declared extension field is deleted. + /// /// Field 5: `reserved` pub reserved: Option, + /// If true, indicates that the extension must be defined as repeated. + /// Otherwise the extension must be defined as optional. + /// /// Field 6: `repeated` pub repeated: Option, #[doc(hidden)] @@ -1948,6 +2054,7 @@ pub mod extension_range_options { } } } +/// Describes a field within a message. #[derive(Clone, PartialEq, Default)] pub struct FieldDescriptorProto { /// Field 1: `name` @@ -1956,20 +2063,67 @@ pub struct FieldDescriptorProto { pub number: Option, /// Field 4: `label` pub label: Option, + /// If type_name is set, this need not be set. If both this and type_name + /// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + /// /// Field 5: `type` pub r#type: Option, + /// For message and enum types, this is the name of the type. If the name + /// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + /// rules are used to find the type (i.e. first the nested types within this + /// message are searched, then within the parent, on up to the root + /// namespace). + /// /// Field 6: `type_name` pub type_name: Option<::buffa::alloc::string::String>, + /// For extensions, this is the name of the type being extended. It is + /// resolved in the same manner as type_name. + /// /// Field 2: `extendee` pub extendee: Option<::buffa::alloc::string::String>, + /// For numeric types, contains the original text representation of the value. + /// For booleans, "true" or "false". + /// For strings, contains the default text contents (not escaped in any way). + /// For bytes, contains the C escaped value. All bytes >= 128 are escaped. + /// /// Field 7: `default_value` pub default_value: Option<::buffa::alloc::string::String>, + /// If set, gives the index of a oneof in the containing type's oneof_decl + /// list. This field is a member of that oneof. + /// /// Field 9: `oneof_index` pub oneof_index: Option, + /// JSON name of this field. The value is set by protocol compiler. If the + /// user has set a "json_name" option on this field, that option's value + /// will be used. Otherwise, it's deduced from the field's name by converting + /// it to camelCase. + /// /// Field 10: `json_name` pub json_name: Option<::buffa::alloc::string::String>, /// Field 8: `options` pub options: ::buffa::MessageField, + /// If true, this is a proto3 "optional". When a proto3 field is optional, it + /// tracks presence regardless of field type. + /// + /// When proto3_optional is true, this field must belong to a oneof to signal + /// to old proto3 clients that presence is tracked for this field. This oneof + /// is known as a "synthetic" oneof, and this field must be its sole member + /// (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + /// exist in the descriptor only, and do not generate any API. Synthetic oneofs + /// must be ordered after all "real" oneofs. + /// + /// For message fields, proto3_optional doesn't create any semantic change, + /// since non-repeated message fields always track presence. However it still + /// indicates the semantic detail of whether the user wrote "optional" or not. + /// This can be useful for round-tripping the .proto file. For consistency we + /// give message fields a synthetic oneof also, even though it is not required + /// to track presence. This is especially important because the parser can't + /// tell if a field is a message or an enum, so it must always create a + /// synthetic oneof. + /// + /// Proto2 optional fields do not set this flag, because they already indicate + /// optional with `LABEL_OPTIONAL`. + /// /// Field 17: `proto3_optional` pub proto3_optional: Option, #[doc(hidden)] @@ -2356,23 +2510,38 @@ pub mod field_descriptor_proto { #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum Type { + /// 0 is reserved for errors. + /// Order is weird for historical reasons. TYPE_DOUBLE = 1i32, TYPE_FLOAT = 2i32, + /// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + /// negative values are likely. TYPE_INT64 = 3i32, TYPE_UINT64 = 4i32, + /// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + /// negative values are likely. TYPE_INT32 = 5i32, TYPE_FIXED64 = 6i32, TYPE_FIXED32 = 7i32, TYPE_BOOL = 8i32, TYPE_STRING = 9i32, + /// Tag-delimited aggregate. + /// Group type is deprecated and not supported after google.protobuf. However, Proto3 + /// implementations should still be able to parse the group wire format and + /// treat group fields as unknown fields. In Editions, the group wire format + /// can be enabled via the `message_encoding` feature. TYPE_GROUP = 10i32, + /// Length-delimited aggregate. TYPE_MESSAGE = 11i32, + /// New in version 2. TYPE_BYTES = 12i32, TYPE_UINT32 = 13i32, TYPE_ENUM = 14i32, TYPE_SFIXED32 = 15i32, TYPE_SFIXED64 = 16i32, + /// Uses ZigZag encoding. TYPE_SINT32 = 17i32, + /// Uses ZigZag encoding. TYPE_SINT64 = 18i32, } impl ::core::default::Default for Type { @@ -2456,8 +2625,12 @@ pub mod field_descriptor_proto { #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum Label { + /// 0 is reserved for errors LABEL_OPTIONAL = 1i32, LABEL_REPEATED = 3i32, + /// The required label is only allowed in google.protobuf. In proto3 and Editions + /// it's explicitly prohibited. In Editions, the `field_presence` feature + /// can be used to get this behavior. LABEL_REQUIRED = 2i32, } impl ::core::default::Default for Label { @@ -2494,6 +2667,7 @@ pub mod field_descriptor_proto { } } } +/// Describes a oneof. #[derive(Clone, PartialEq, Default)] pub struct OneofDescriptorProto { /// Field 1: `name` @@ -2638,6 +2812,7 @@ impl ::buffa::ExtensionSet for OneofDescriptorProto { &mut self.__buffa_unknown_fields } } +/// Describes an enum type. #[derive(Clone, PartialEq, Default)] pub struct EnumDescriptorProto { /// Field 1: `name` @@ -2646,12 +2821,21 @@ pub struct EnumDescriptorProto { pub value: ::buffa::alloc::vec::Vec, /// Field 3: `options` pub options: ::buffa::MessageField, + /// Range of reserved numeric values. Reserved numeric values may not be used + /// by enum values in the same enum declaration. Reserved ranges may not + /// overlap. + /// /// Field 4: `reserved_range` pub reserved_range: ::buffa::alloc::vec::Vec< enum_descriptor_proto::EnumReservedRange, >, + /// Reserved enum value names, which may not be reused. A given name may only + /// be reserved once. + /// /// Field 5: `reserved_name` pub reserved_name: ::buffa::alloc::vec::Vec<::buffa::alloc::string::String>, + /// Support for `export` and `local` keywords on enums. + /// /// Field 6: `visibility` pub visibility: Option, #[doc(hidden)] @@ -2907,10 +3091,20 @@ impl ::buffa::ExtensionSet for EnumDescriptorProto { pub mod enum_descriptor_proto { #[allow(unused_imports)] use super::*; + /// Range of reserved numeric values. Reserved values may not be used by + /// entries in the same enum. Reserved ranges may not overlap. + /// + /// Note that this is distinct from DescriptorProto.ReservedRange in that it + /// is inclusive such that it can appropriately represent the entire int32 + /// domain. #[derive(Clone, PartialEq, Default)] pub struct EnumReservedRange { + /// Inclusive. + /// /// Field 1: `start` pub start: Option, + /// Inclusive. + /// /// Field 2: `end` pub end: Option, #[doc(hidden)] @@ -3039,6 +3233,7 @@ pub mod enum_descriptor_proto { } } } +/// Describes a value within an enum. #[derive(Clone, PartialEq, Default)] pub struct EnumValueDescriptorProto { /// Field 1: `name` @@ -3207,6 +3402,7 @@ impl ::buffa::ExtensionSet for EnumValueDescriptorProto { &mut self.__buffa_unknown_fields } } +/// Describes a service. #[derive(Clone, PartialEq, Default)] pub struct ServiceDescriptorProto { /// Field 1: `name` @@ -3382,18 +3578,26 @@ impl ::buffa::ExtensionSet for ServiceDescriptorProto { &mut self.__buffa_unknown_fields } } +/// Describes a method of a service. #[derive(Clone, PartialEq, Default)] pub struct MethodDescriptorProto { /// Field 1: `name` pub name: Option<::buffa::alloc::string::String>, + /// Input and output type names. These are resolved in the same way as + /// FieldDescriptorProto.type_name, but must refer to a message type. + /// /// Field 2: `input_type` pub input_type: Option<::buffa::alloc::string::String>, /// Field 3: `output_type` pub output_type: Option<::buffa::alloc::string::String>, /// Field 4: `options` pub options: ::buffa::MessageField, + /// Identifies if client streams multiple client messages + /// /// Field 5: `client_streaming` pub client_streaming: Option, + /// Identifies if server streams multiple server messages + /// /// Field 6: `server_streaming` pub server_streaming: Option, #[doc(hidden)] @@ -3634,48 +3838,168 @@ impl ::buffa::ExtensionSet for MethodDescriptorProto { &mut self.__buffa_unknown_fields } } +/// =================================================================== +/// Options +/// +/// Each of the definitions above may have "options" attached. These are +/// just annotations which may cause code to be generated slightly differently +/// or may contain hints for code that manipulates protocol messages. +/// +/// Clients may define custom options as extensions of the *Options messages. +/// These extensions may not yet be known at parsing time, so the parser cannot +/// store the values in them. Instead it stores them in a field in the *Options +/// message called uninterpreted_option. This field must have the same name +/// across all *Options messages. We then use this field to populate the +/// extensions when we build a descriptor, at which point all protos have been +/// parsed and so all extensions are known. +/// +/// Extension numbers for custom options may be chosen as follows: +/// * For options which will only be used within a single application or +/// organization, or for experimental options, use field numbers 50000 +/// through 99999. It is up to you to ensure that you do not use the +/// same number for multiple options. +/// * For options which will be published and used publicly by multiple +/// independent entities, e-mail protobuf-global-extension-registry@google.com +/// to reserve extension numbers. Simply provide your project name (e.g. +/// Objective-C plugin) and your project website (if available) -- there's no +/// need to explain how you intend to use them. Usually you only need one +/// extension number. You can declare multiple options with only one extension +/// number by putting them in a sub-message. See the Custom Options section of +/// the docs for examples: +/// https://developers.google.com/protocol-buffers/docs/proto#options +/// If this turns out to be popular, a web service will be set up +/// to automatically assign option numbers. #[derive(Clone, PartialEq, Default)] pub struct FileOptions { + /// Sets the Java package where classes generated from this .proto will be + /// placed. By default, the proto package is used, but this is often + /// inappropriate because proto packages do not normally start with backwards + /// domain names. + /// /// Field 1: `java_package` pub java_package: Option<::buffa::alloc::string::String>, + /// Controls the name of the wrapper Java class generated for the .proto file. + /// That class will always contain the .proto file's getDescriptor() method as + /// well as any top-level extensions defined in the .proto file. + /// If java_multiple_files is disabled, then all the other classes from the + /// .proto file will be nested inside the single wrapper outer class. + /// /// Field 8: `java_outer_classname` pub java_outer_classname: Option<::buffa::alloc::string::String>, + /// If enabled, then the Java code generator will generate a separate .java + /// file for each top-level message, enum, and service defined in the .proto + /// file. Thus, these types will *not* be nested inside the wrapper class + /// named by java_outer_classname. However, the wrapper class will still be + /// generated to contain the file's getDescriptor() method as well as any + /// top-level extensions defined in the file. + /// /// Field 10: `java_multiple_files` pub java_multiple_files: Option, + /// This option does nothing. + /// /// Field 20: `java_generate_equals_and_hash` pub java_generate_equals_and_hash: Option, + /// A proto2 file can set this to true to opt in to UTF-8 checking for Java, + /// which will throw an exception if invalid UTF-8 is parsed from the wire or + /// assigned to a string field. + /// + /// TODO: clarify exactly what kinds of field types this option + /// applies to, and update these docs accordingly. + /// + /// Proto3 files already perform these checks. Setting the option explicitly to + /// false has no effect: it cannot be used to opt proto3 files out of UTF-8 + /// checks. + /// /// Field 27: `java_string_check_utf8` pub java_string_check_utf8: Option, /// Field 9: `optimize_for` pub optimize_for: Option, + /// Sets the Go package where structs generated from this .proto will be + /// placed. If omitted, the Go package will be derived from the following: + /// - The basename of the package import path, if provided. + /// - Otherwise, the package statement in the .proto file, if present. + /// - Otherwise, the basename of the .proto file, without extension. + /// /// Field 11: `go_package` pub go_package: Option<::buffa::alloc::string::String>, + /// Should generic services be generated in each language? "Generic" services + /// are not specific to any particular RPC system. They are generated by the + /// main code generators in each language (without additional plugins). + /// Generic services were the only kind of service generation supported by + /// early versions of google.protobuf. + /// + /// Generic services are now considered deprecated in favor of using plugins + /// that generate code specific to your particular RPC system. Therefore, + /// these default to false. Old code which depends on generic services should + /// explicitly set them to true. + /// /// Field 16: `cc_generic_services` pub cc_generic_services: Option, /// Field 17: `java_generic_services` pub java_generic_services: Option, /// Field 18: `py_generic_services` pub py_generic_services: Option, + /// Is this file deprecated? + /// Depending on the target platform, this can emit Deprecated annotations + /// for everything in the file, or it will be completely ignored; in the very + /// least, this is a formalization for deprecating files. + /// /// Field 23: `deprecated` pub deprecated: Option, + /// Enables the use of arenas for the proto messages in this file. This applies + /// only to generated classes for C++. + /// /// Field 31: `cc_enable_arenas` pub cc_enable_arenas: Option, + /// Sets the objective c class prefix which is prepended to all objective c + /// generated classes from this .proto. There is no default. + /// /// Field 36: `objc_class_prefix` pub objc_class_prefix: Option<::buffa::alloc::string::String>, + /// Namespace for generated classes; defaults to the package. + /// /// Field 37: `csharp_namespace` pub csharp_namespace: Option<::buffa::alloc::string::String>, + /// By default Swift generators will take the proto package and CamelCase it + /// replacing '.' with underscore and use that to prefix the types/symbols + /// defined. When this options is provided, they will use this value instead + /// to prefix the types/symbols defined. + /// /// Field 39: `swift_prefix` pub swift_prefix: Option<::buffa::alloc::string::String>, + /// Sets the php class prefix which is prepended to all php generated classes + /// from this .proto. Default is empty. + /// /// Field 40: `php_class_prefix` pub php_class_prefix: Option<::buffa::alloc::string::String>, + /// Use this option to change the namespace of php generated classes. Default + /// is empty. When this option is empty, the package name will be used for + /// determining the namespace. + /// /// Field 41: `php_namespace` pub php_namespace: Option<::buffa::alloc::string::String>, + /// Use this option to change the namespace of php generated metadata classes. + /// Default is empty. When this option is empty, the proto file name will be + /// used for determining the namespace. + /// /// Field 44: `php_metadata_namespace` pub php_metadata_namespace: Option<::buffa::alloc::string::String>, + /// Use this option to change the package of ruby generated classes. Default + /// is empty. When this option is not set, the package name will be used for + /// determining the ruby package. + /// /// Field 45: `ruby_package` pub ruby_package: Option<::buffa::alloc::string::String>, + /// Any features defined in the specific edition. + /// WARNING: This field should only be used by protobuf plugins or special + /// cases like the proto compiler. Other uses are discouraged and + /// developers should rely on the protoreflect APIs for their client language. + /// /// Field 50: `features` pub features: ::buffa::MessageField, + /// The parser stores options it doesn't recognize here. + /// See the documentation for the "Options" section above. + /// /// Field 999: `uninterpreted_option` pub uninterpreted_option: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -4306,11 +4630,17 @@ impl ::buffa::ExtensionSet for FileOptions { pub mod file_options { #[allow(unused_imports)] use super::*; + /// Generated classes can be optimized for speed or code size. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum OptimizeMode { + /// Generate complete code for parsing, serialization, SPEED = 1i32, + /// etc. + /// + /// Use ReflectionOps to implement these methods. CODE_SIZE = 2i32, + /// Generate code using MessageLite and the lite runtime. LITE_RUNTIME = 3i32, } impl ::core::default::Default for OptimizeMode { @@ -4349,18 +4679,92 @@ pub mod file_options { } #[derive(Clone, PartialEq, Default)] pub struct MessageOptions { + /// Set true to use the old proto1 MessageSet wire format for extensions. + /// This is provided for backwards-compatibility with the MessageSet wire + /// format. You should not use this for any other reason: It's less + /// efficient, has fewer features, and is more complicated. + /// + /// The message must be defined exactly as follows: + /// message Foo { + /// ```text + /// option message_set_wire_format = true; + /// extensions 4 to max; + /// ``` + /// } + /// Note that the message cannot have any defined fields; MessageSets only + /// have extensions. + /// + /// All extensions of your type must be singular messages; e.g. they cannot + /// be int32s, enums, or repeated messages. + /// + /// Because this is an option, the above two restrictions are not enforced by + /// the protocol compiler. + /// /// Field 1: `message_set_wire_format` pub message_set_wire_format: Option, + /// Disables the generation of the standard "descriptor()" accessor, which can + /// conflict with a field of the same name. This is meant to make migration + /// from proto1 easier; new code should avoid fields named "descriptor". + /// /// Field 2: `no_standard_descriptor_accessor` pub no_standard_descriptor_accessor: Option, + /// Is this message deprecated? + /// Depending on the target platform, this can emit Deprecated annotations + /// for the message, or it will be completely ignored; in the very least, + /// this is a formalization for deprecating messages. + /// /// Field 3: `deprecated` pub deprecated: Option, + /// Whether the message is an automatically generated map entry type for the + /// maps field. + /// + /// For maps fields: + /// ```text + /// map map_field = 1; + /// ``` + /// The parsed descriptor looks like: + /// ```text + /// message MapFieldEntry { + /// option map_entry = true; + /// optional KeyType key = 1; + /// optional ValueType value = 2; + /// } + /// repeated MapFieldEntry map_field = 1; + /// ``` + /// + /// Implementations may choose not to generate the map_entry=true message, but + /// use a native map in the target language to hold the keys and values. + /// The reflection APIs in such implementations still need to work as + /// if the field is a repeated message field. + /// + /// NOTE: Do not set the option in .proto files. Always use the maps syntax + /// instead. The option should only be implicitly set by the proto compiler + /// parser. + /// /// Field 7: `map_entry` pub map_entry: Option, + /// Enable the legacy handling of JSON field name conflicts. This lowercases + /// and strips underscored from the fields before comparison in proto3 only. + /// The new behavior takes `json_name` into account and applies to proto2 as + /// well. + /// + /// This should only be used as a temporary measure against broken builds due + /// to the change in behavior for JSON field name conflicts. + /// + /// TODO This is legacy behavior we plan to remove once downstream + /// teams have had time to migrate. + /// /// Field 11: `deprecated_legacy_json_field_conflicts` pub deprecated_legacy_json_field_conflicts: Option, + /// Any features defined in the specific edition. + /// WARNING: This field should only be used by protobuf plugins or special + /// cases like the proto compiler. Other uses are discouraged and + /// developers should rely on the protoreflect APIs for their client language. + /// /// Field 12: `features` pub features: ::buffa::MessageField, + /// The parser stores options it doesn't recognize here. See above. + /// /// Field 999: `uninterpreted_option` pub uninterpreted_option: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -4619,20 +5023,86 @@ impl ::buffa::ExtensionSet for MessageOptions { } #[derive(Clone, PartialEq, Default)] pub struct FieldOptions { + /// NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead. + /// The ctype option instructs the C++ code generator to use a different + /// representation of the field than it normally would. See the specific + /// options below. This option is only implemented to support use of + /// [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + /// type "bytes" in the open source release. + /// TODO: make ctype actually deprecated. + /// /// Field 1: `ctype` pub ctype: Option, + /// The packed option can be enabled for repeated primitive fields to enable + /// a more efficient representation on the wire. Rather than repeatedly + /// writing the tag and type for each element, the entire array is encoded as + /// a single length-delimited blob. In proto3, only explicit setting it to + /// false will avoid using packed encoding. This option is prohibited in + /// Editions, but the `repeated_field_encoding` feature can be used to control + /// the behavior. + /// /// Field 2: `packed` pub packed: Option, + /// The jstype option determines the JavaScript type used for values of the + /// field. The option is permitted only for 64 bit integral and fixed types + /// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + /// is represented as JavaScript string, which avoids loss of precision that + /// can happen when a large value is converted to a floating point JavaScript. + /// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + /// use the JavaScript "number" type. The behavior of the default option + /// JS_NORMAL is implementation dependent. + /// + /// This option is an enum to permit additional types to be added, e.g. + /// goog.math.Integer. + /// /// Field 6: `jstype` pub jstype: Option, + /// Should this field be parsed lazily? Lazy applies only to message-type + /// fields. It means that when the outer message is initially parsed, the + /// inner message's contents will not be parsed but instead stored in encoded + /// form. The inner message will actually be parsed when it is first accessed. + /// + /// This is only a hint. Implementations are free to choose whether to use + /// eager or lazy parsing regardless of the value of this option. However, + /// setting this option true suggests that the protocol author believes that + /// using lazy parsing on this field is worth the additional bookkeeping + /// overhead typically needed to implement it. + /// + /// This option does not affect the public interface of any generated code; + /// all method signatures remain the same. Furthermore, thread-safety of the + /// interface is not affected by this option; const methods remain safe to + /// call from multiple threads concurrently, while non-const methods continue + /// to require exclusive access. + /// + /// Note that lazy message fields are still eagerly verified to check + /// ill-formed wireformat or missing required fields. Calling IsInitialized() + /// on the outer message would fail if the inner message has missing required + /// fields. Failed verification would result in parsing failure (except when + /// uninitialized messages are acceptable). + /// /// Field 5: `lazy` pub lazy: Option, + /// unverified_lazy does no correctness checks on the byte stream. This should + /// only be used where lazy with verification is prohibitive for performance + /// reasons. + /// /// Field 15: `unverified_lazy` pub unverified_lazy: Option, + /// Is this field deprecated? + /// Depending on the target platform, this can emit Deprecated annotations + /// for accessors, or it will be completely ignored; in the very least, this + /// is a formalization for deprecating fields. + /// /// Field 3: `deprecated` pub deprecated: Option, + /// DEPRECATED. DO NOT USE! + /// For Google-internal migration only. Do not use. + /// /// Field 10: `weak` pub weak: Option, + /// Indicate that the field value should not be printed out when using debug + /// formats, e.g. when the field contains sensitive credentials. + /// /// Field 16: `debug_redact` pub debug_redact: Option, /// Field 17: `retention` @@ -4641,10 +5111,17 @@ pub struct FieldOptions { pub targets: ::buffa::alloc::vec::Vec, /// Field 20: `edition_defaults` pub edition_defaults: ::buffa::alloc::vec::Vec, + /// Any features defined in the specific edition. + /// WARNING: This field should only be used by protobuf plugins or special + /// cases like the proto compiler. Other uses are discouraged and + /// developers should rely on the protoreflect APIs for their client language. + /// /// Field 21: `features` pub features: ::buffa::MessageField, /// Field 22: `feature_support` pub feature_support: ::buffa::MessageField, + /// The parser stores options it doesn't recognize here. See above. + /// /// Field 999: `uninterpreted_option` pub uninterpreted_option: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -5140,7 +5617,14 @@ pub mod field_options { #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum CType { + /// Default mode. STRING = 0i32, + /// The option [ctype=CORD] may be applied to a non-repeated field of type + /// "bytes". It indicates that in C++, the data should be stored in a Cord + /// instead of a string. For very large strings, this may reduce memory + /// fragmentation. It may also allow better performance when parsing from a + /// Cord, or when parsing with aliasing enabled, as the parsed Cord may then + /// alias the original buffer. CORD = 1i32, STRING_PIECE = 2i32, } @@ -5180,8 +5664,11 @@ pub mod field_options { #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum JSType { + /// Use the default type. JS_NORMAL = 0i32, + /// Use JavaScript strings. JS_STRING = 1i32, + /// Use JavaScript numbers. JS_NUMBER = 2i32, } impl ::core::default::Default for JSType { @@ -5217,6 +5704,7 @@ pub mod field_options { } } } + /// If set to RETENTION_SOURCE, the option will be omitted from the binary. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum OptionRetention { @@ -5263,6 +5751,9 @@ pub mod field_options { } } } + /// This indicates the types of entities that the field may apply to when used + /// as an option. If it is unset, then the field may be freely used as an + /// option on any kind of entity. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum OptionTargetType { @@ -5355,6 +5846,8 @@ pub mod field_options { pub struct EditionDefault { /// Field 3: `edition` pub edition: Option, + /// Textproto value. + /// /// Field 2: `value` pub value: Option<::buffa::alloc::string::String>, #[doc(hidden)] @@ -5497,14 +5990,29 @@ pub mod field_options { &mut self.__buffa_unknown_fields } } + /// Information about the support window of a feature. #[derive(Clone, PartialEq, Default)] pub struct FeatureSupport { + /// The edition that this feature was first available in. In editions + /// earlier than this one, the default assigned to EDITION_LEGACY will be + /// used, and proto files will not be able to override it. + /// /// Field 1: `edition_introduced` pub edition_introduced: Option, + /// The edition this feature becomes deprecated in. Using this after this + /// edition may trigger warnings. + /// /// Field 2: `edition_deprecated` pub edition_deprecated: Option, + /// The deprecation warning text if this feature is used after the edition it + /// was marked deprecated in. + /// /// Field 3: `deprecation_warning` pub deprecation_warning: Option<::buffa::alloc::string::String>, + /// The edition this feature is no longer available in. In editions after + /// this one, the last default assigned will be used, and proto files will + /// not be able to override it. + /// /// Field 4: `edition_removed` pub edition_removed: Option, #[doc(hidden)] @@ -5712,8 +6220,15 @@ pub mod field_options { } #[derive(Clone, PartialEq, Default)] pub struct OneofOptions { + /// Any features defined in the specific edition. + /// WARNING: This field should only be used by protobuf plugins or special + /// cases like the proto compiler. Other uses are discouraged and + /// developers should rely on the protoreflect APIs for their client language. + /// /// Field 1: `features` pub features: ::buffa::MessageField, + /// The parser stores options it doesn't recognize here. See above. + /// /// Field 999: `uninterpreted_option` pub uninterpreted_option: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -5856,14 +6371,36 @@ impl ::buffa::ExtensionSet for OneofOptions { } #[derive(Clone, PartialEq, Default)] pub struct EnumOptions { + /// Set this option to true to allow mapping different tag names to the same + /// value. + /// /// Field 2: `allow_alias` pub allow_alias: Option, + /// Is this enum deprecated? + /// Depending on the target platform, this can emit Deprecated annotations + /// for the enum, or it will be completely ignored; in the very least, this + /// is a formalization for deprecating enums. + /// /// Field 3: `deprecated` pub deprecated: Option, + /// Enable the legacy handling of JSON field name conflicts. This lowercases + /// and strips underscored from the fields before comparison in proto3 only. + /// The new behavior takes `json_name` into account and applies to proto2 as + /// well. + /// TODO Remove this legacy behavior once downstream teams have + /// had time to migrate. + /// /// Field 6: `deprecated_legacy_json_field_conflicts` pub deprecated_legacy_json_field_conflicts: Option, + /// Any features defined in the specific edition. + /// WARNING: This field should only be used by protobuf plugins or special + /// cases like the proto compiler. Other uses are discouraged and + /// developers should rely on the protoreflect APIs for their client language. + /// /// Field 7: `features` pub features: ::buffa::MessageField, + /// The parser stores options it doesn't recognize here. See above. + /// /// Field 999: `uninterpreted_option` pub uninterpreted_option: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -6075,14 +6612,32 @@ impl ::buffa::ExtensionSet for EnumOptions { } #[derive(Clone, PartialEq, Default)] pub struct EnumValueOptions { + /// Is this enum value deprecated? + /// Depending on the target platform, this can emit Deprecated annotations + /// for the enum value, or it will be completely ignored; in the very least, + /// this is a formalization for deprecating enum values. + /// /// Field 1: `deprecated` pub deprecated: Option, + /// Any features defined in the specific edition. + /// WARNING: This field should only be used by protobuf plugins or special + /// cases like the proto compiler. Other uses are discouraged and + /// developers should rely on the protoreflect APIs for their client language. + /// /// Field 2: `features` pub features: ::buffa::MessageField, + /// Indicate that fields annotated with this enum value should not be printed + /// out when using debug formats, e.g. when the field contains sensitive + /// credentials. + /// /// Field 3: `debug_redact` pub debug_redact: Option, + /// Information about the support window of a feature value. + /// /// Field 4: `feature_support` pub feature_support: ::buffa::MessageField, + /// The parser stores options it doesn't recognize here. See above. + /// /// Field 999: `uninterpreted_option` pub uninterpreted_option: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -6304,10 +6859,27 @@ impl ::buffa::ExtensionSet for EnumValueOptions { } #[derive(Clone, PartialEq, Default)] pub struct ServiceOptions { + /// Any features defined in the specific edition. + /// WARNING: This field should only be used by protobuf plugins or special + /// cases like the proto compiler. Other uses are discouraged and + /// developers should rely on the protoreflect APIs for their client language. + /// /// Field 34: `features` pub features: ::buffa::MessageField, + /// Note: Field numbers 1 through 32 are reserved for Google's internal RPC + /// framework. We apologize for hoarding these numbers to ourselves, but + /// we were already using them long before we decided to release Protocol + /// Buffers. + /// + /// Is this service deprecated? + /// Depending on the target platform, this can emit Deprecated annotations + /// for the service, or it will be completely ignored; in the very least, + /// this is a formalization for deprecating services. + /// /// Field 33: `deprecated` pub deprecated: Option, + /// The parser stores options it doesn't recognize here. See above. + /// /// Field 999: `uninterpreted_option` pub uninterpreted_option: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -6472,12 +7044,29 @@ impl ::buffa::ExtensionSet for ServiceOptions { } #[derive(Clone, PartialEq, Default)] pub struct MethodOptions { + /// Note: Field numbers 1 through 32 are reserved for Google's internal RPC + /// framework. We apologize for hoarding these numbers to ourselves, but + /// we were already using them long before we decided to release Protocol + /// Buffers. + /// + /// Is this method deprecated? + /// Depending on the target platform, this can emit Deprecated annotations + /// for the method, or it will be completely ignored; in the very least, + /// this is a formalization for deprecating methods. + /// /// Field 33: `deprecated` pub deprecated: Option, /// Field 34: `idempotency_level` pub idempotency_level: Option, + /// Any features defined in the specific edition. + /// WARNING: This field should only be used by protobuf plugins or special + /// cases like the proto compiler. Other uses are discouraged and + /// developers should rely on the protoreflect APIs for their client language. + /// /// Field 35: `features` pub features: ::buffa::MessageField, + /// The parser stores options it doesn't recognize here. See above. + /// /// Field 999: `uninterpreted_option` pub uninterpreted_option: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -6674,11 +7263,16 @@ impl ::buffa::ExtensionSet for MethodOptions { pub mod method_options { #[allow(unused_imports)] use super::*; + /// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + /// or neither? HTTP based RPC implementation may choose GET verb for safe + /// methods, and PUT verb for idempotent methods instead of the default POST. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum IdempotencyLevel { IDEMPOTENCY_UNKNOWN = 0i32, + /// implies idempotent NO_SIDE_EFFECTS = 1i32, + /// idempotent, but may have side effects IDEMPOTENT = 2i32, } impl ::core::default::Default for IdempotencyLevel { @@ -6717,10 +7311,19 @@ pub mod method_options { } } } +/// A message representing a option the parser does not recognize. This only +/// appears in options protos created by the compiler::Parser class. +/// DescriptorPool resolves these when building Descriptor objects. Therefore, +/// options protos in descriptor objects (e.g. returned by Descriptor::options(), +/// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +/// in them. #[derive(Clone, PartialEq, Default)] pub struct UninterpretedOption { /// Field 2: `name` pub name: ::buffa::alloc::vec::Vec, + /// The value of the uninterpreted option, in whatever type the tokenizer + /// identified it as during parsing. Exactly one of these should be set. + /// /// Field 3: `identifier_value` pub identifier_value: Option<::buffa::alloc::string::String>, /// Field 4: `positive_int_value` @@ -6994,6 +7597,11 @@ impl ::buffa::ExtensionSet for UninterpretedOption { pub mod uninterpreted_option { #[allow(unused_imports)] use super::*; + /// The name of the uninterpreted option. Each string represents a segment in + /// a dot-separated name. is_extension is true iff a segment represents an + /// extension (denoted with parentheses in options specs in .proto files). + /// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents + /// "foo.(bar.baz).moo". #[derive(Clone, PartialEq, Default)] pub struct NamePart { /// Field 1: `name_part` @@ -7114,6 +7722,15 @@ pub mod uninterpreted_option { } } } +/// =================================================================== +/// Features +/// +/// TODO Enums in C++ gencode (and potentially other languages) are +/// not well scoped. This means that each of the feature enums below can clash +/// with each other. The short names we've chosen maximize call-site +/// readability, but leave us very open to this scenario. A future feature will +/// be designed and implemented to handle this, hopefully before we ever hit a +/// conflict here. #[derive(Clone, PartialEq, Default)] pub struct FeatureSet { /// Field 1: `field_presence` @@ -7856,9 +8473,15 @@ pub mod feature_set { #[repr(i32)] pub enum DefaultSymbolVisibility { DEFAULT_SYMBOL_VISIBILITY_UNKNOWN = 0i32, + /// Default pre-EDITION_2024, all UNSET visibility are export. EXPORT_ALL = 1i32, + /// All top-level symbols default to export, nested default to local. EXPORT_TOP_LEVEL = 2i32, + /// All symbols default to local. LOCAL_ALL = 3i32, + /// All symbols local by default. Nested types cannot be exported. + /// With special case caveat for message { enum {} reserved 1 to max; } + /// This is the recommended setting for new protos. STRICT = 4i32, } impl ::core::default::Default for DefaultSymbolVisibility { @@ -7914,14 +8537,24 @@ pub mod feature_set { } } } +/// A compiled specification for the defaults of a set of features. These +/// messages are generated from FeatureSet extensions and can be used to seed +/// feature resolution. The resolution with this object becomes a simple search +/// for the closest matching edition, followed by proto merges. #[derive(Clone, PartialEq, Default)] pub struct FeatureSetDefaults { /// Field 1: `defaults` pub defaults: ::buffa::alloc::vec::Vec< feature_set_defaults::FeatureSetEditionDefault, >, + /// The minimum supported edition (inclusive) when this was constructed. + /// Editions before this will not have defaults. + /// /// Field 4: `minimum_edition` pub minimum_edition: Option, + /// The maximum known edition (inclusive) when this was constructed. Editions + /// after this will not have reliable defaults. + /// /// Field 5: `maximum_edition` pub maximum_edition: Option, #[doc(hidden)] @@ -8099,12 +8732,20 @@ impl ::buffa::ExtensionSet for FeatureSetDefaults { pub mod feature_set_defaults { #[allow(unused_imports)] use super::*; + /// A map from every known edition with a unique set of defaults to its + /// defaults. Not all editions may be contained here. For a given edition, + /// the defaults at the closest matching edition ordered at or before it should + /// be used. This field must be in strict ascending order by edition. #[derive(Clone, PartialEq, Default)] pub struct FeatureSetEditionDefault { /// Field 3: `edition` pub edition: Option, + /// Defaults of features that can be overridden in this edition. + /// /// Field 4: `overridable_features` pub overridable_features: ::buffa::MessageField, + /// Defaults of features that can't be overridden in this edition. + /// /// Field 5: `fixed_features` pub fixed_features: ::buffa::MessageField, #[doc(hidden)] @@ -8288,8 +8929,59 @@ pub mod feature_set_defaults { } } } +/// =================================================================== +/// Optional source code info +/// +/// Encapsulates information about the original source file from which a +/// FileDescriptorProto was generated. #[derive(Clone, PartialEq, Default)] pub struct SourceCodeInfo { + /// A Location identifies a piece of source code in a .proto file which + /// corresponds to a particular definition. This information is intended + /// to be useful to IDEs, code indexers, documentation generators, and similar + /// tools. + /// + /// For example, say we have a file like: + /// message Foo { + /// ```text + /// optional string foo = 1; + /// ``` + /// } + /// Let's look at just the field definition: + /// optional string foo = 1; + /// ^ ^^ ^^ ^ ^^^ + /// a bc de f ghi + /// We have the following locations: + /// span path represents + /// [a,i) [ 4, 0, 2, 0 ] The whole field definition. + /// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + /// [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + /// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + /// [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + /// + /// Notes: + /// - A location may refer to a repeated field itself (i.e. not to any + /// particular index within it). This is used whenever a set of elements are + /// logically enclosed in a single code segment. For example, an entire + /// extend block (possibly containing multiple extension definitions) will + /// have an outer location whose path refers to the "extensions" repeated + /// field without an index. + /// - Multiple locations may have the same path. This happens when a single + /// logical declaration is spread out across multiple places. The most + /// obvious example is the "extend" block again -- there may be multiple + /// extend blocks in the same scope, each of which will have the same path. + /// - A location's span is not always a subset of its parent's span. For + /// example, the "extendee" of an extension declaration appears at the + /// beginning of the "extend" block and is shared by all extensions within + /// the block. + /// - Just because a location's span is a subset of some other location's span + /// does not mean that it is a descendant. For example, a "group" defines + /// both a type and a field in a single declaration. Thus, the locations + /// corresponding to the type and field and their components will overlap. + /// - Code which tries to interpret locations should probably be designed to + /// ignore those that it doesn't understand, as more types of locations could + /// be recorded in the future. + /// /// Field 1: `location` pub location: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -8402,10 +9094,94 @@ pub mod source_code_info { use super::*; #[derive(Clone, PartialEq, Default)] pub struct Location { + /// Identifies which part of the FileDescriptorProto was defined at this + /// location. + /// + /// Each element is a field number or an index. They form a path from + /// the root FileDescriptorProto to the place where the definition appears. + /// For example, this path: + /// [ 4, 3, 2, 7, 1 ] + /// refers to: + /// file.message_type(3) // 4, 3 + /// ```text + /// .field(7) // 2, 7 + /// .name() // 1 + /// ``` + /// This is because FileDescriptorProto.message_type has field number 4: + /// repeated DescriptorProto message_type = 4; + /// and DescriptorProto.field has field number 2: + /// repeated FieldDescriptorProto field = 2; + /// and FieldDescriptorProto.name has field number 1: + /// optional string name = 1; + /// + /// Thus, the above path gives the location of a field name. If we removed + /// the last element: + /// [ 4, 3, 2, 7 ] + /// this path refers to the whole field declaration (from the beginning + /// of the label to the terminating semicolon). + /// /// Field 1: `path` pub path: ::buffa::alloc::vec::Vec, + /// Always has exactly three or four elements: start line, start column, + /// end line (optional, otherwise assumed same as start line), end column. + /// These are packed into a single field for efficiency. Note that line + /// and column numbers are zero-based -- typically you will want to add + /// 1 to each before displaying to a user. + /// /// Field 2: `span` pub span: ::buffa::alloc::vec::Vec, + /// If this SourceCodeInfo represents a complete declaration, these are any + /// comments appearing before and after the declaration which appear to be + /// attached to the declaration. + /// + /// A series of line comments appearing on consecutive lines, with no other + /// tokens appearing on those lines, will be treated as a single comment. + /// + /// leading_detached_comments will keep paragraphs of comments that appear + /// before (but not connected to) the current element. Each paragraph, + /// separated by empty lines, will be one comment element in the repeated + /// field. + /// + /// Only the comment content is provided; comment markers (e.g. //) are + /// stripped out. For block comments, leading whitespace and an asterisk + /// will be stripped from the beginning of each line other than the first. + /// Newlines are included in the output. + /// + /// Examples: + /// + /// optional int32 foo = 1; // Comment attached to foo. + /// // Comment attached to bar. + /// optional int32 bar = 2; + /// + /// optional string baz = 3; + /// // Comment attached to baz. + /// // Another line attached to baz. + /// + /// // Comment attached to moo. + /// // + /// // Another line attached to moo. + /// optional double moo = 4; + /// + /// // Detached comment for corge. This is not leading or trailing comments + /// // to moo or corge because there are blank lines separating it from + /// // both. + /// + /// // Detached comment for corge paragraph 2. + /// + /// optional string corge = 5; + /// /* Block comment attached + /// ```text + /// * to corge. Leading asterisks + /// * will be removed. */ + /// ``` + /// /* Block comment attached to + /// ```text + /// * grault. */ + /// ``` + /// optional int32 grault = 6; + /// + /// // ignored detached comments. + /// /// Field 3: `leading_comments` pub leading_comments: Option<::buffa::alloc::string::String>, /// Field 4: `trailing_comments` @@ -8687,8 +9463,14 @@ pub mod source_code_info { } } } +/// Describes the relationship between generated code and its original source +/// file. A GeneratedCodeInfo message is associated with only one generated +/// source file, but may contain references to different source .proto files. #[derive(Clone, PartialEq, Default)] pub struct GeneratedCodeInfo { + /// An Annotation connects some span of text in generated code to an element + /// of its generating .proto file. + /// /// Field 1: `annotation` pub annotation: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -8806,12 +9588,24 @@ pub mod generated_code_info { use super::*; #[derive(Clone, PartialEq, Default)] pub struct Annotation { + /// Identifies the element in the original source .proto file. This field + /// is formatted the same as SourceCodeInfo.Location.path. + /// /// Field 1: `path` pub path: ::buffa::alloc::vec::Vec, + /// Identifies the filesystem path to the original source .proto. + /// /// Field 2: `source_file` pub source_file: Option<::buffa::alloc::string::String>, + /// Identifies the starting offset in bytes in the generated code + /// that relates to the identified object. + /// /// Field 3: `begin` pub begin: Option, + /// Identifies the ending offset in bytes in the generated code that + /// relates to the identified object. The end offset should be one past + /// the last relevant byte (so the length of the text = end - begin). + /// /// Field 4: `end` pub end: Option, /// Field 5: `semantic` @@ -9057,11 +9851,16 @@ pub mod generated_code_info { pub mod annotation { #[allow(unused_imports)] use super::*; + /// Represents the identified object's effect on the element in the original + /// .proto file. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum Semantic { + /// There is no effect or the effect is indescribable. NONE = 0i32, + /// The element is set or otherwise mutated. SET = 1i32, + /// An alias to the element is returned. ALIAS = 2i32, } impl ::core::default::Default for Semantic { diff --git a/buffa-codegen/src/lib.rs b/buffa-codegen/src/lib.rs index a564570..98090f2 100644 --- a/buffa-codegen/src/lib.rs +++ b/buffa-codegen/src/lib.rs @@ -20,6 +20,7 @@ //! //! All of that is handled upstream (by protoc, buf, or a future parser). +pub(crate) mod comments; pub mod context; pub(crate) mod defaults; pub(crate) mod enumeration; @@ -495,13 +496,18 @@ fn generate_file( let mut tokens = resolver.generate_use_block(); let current_package = file.package.as_deref().unwrap_or(""); let features = crate::features::for_file(file); - for enum_type in &file.enum_type { let enum_rust_name = enum_type.name.as_deref().unwrap_or(""); + let enum_fqn = if current_package.is_empty() { + enum_rust_name.to_string() + } else { + format!("{}.{}", current_package, enum_rust_name) + }; tokens.extend(enumeration::generate_enum( ctx, enum_type, enum_rust_name, + &enum_fqn, &features, &resolver, )?); diff --git a/buffa-codegen/src/message.rs b/buffa-codegen/src/message.rs index 494dbe7..eb5b721 100644 --- a/buffa-codegen/src/message.rs +++ b/buffa-codegen/src/message.rs @@ -66,7 +66,8 @@ pub fn generate_message( .iter() .map(|e| { let enum_name = e.name.as_deref().unwrap_or(""); - crate::enumeration::generate_enum(ctx, e, enum_name, features, resolver) + let enum_fqn = format!("{}.{}", proto_fqn, enum_name); + crate::enumeration::generate_enum(ctx, e, enum_name, &enum_fqn, features, resolver) }) .collect::, _>>()?; @@ -399,7 +400,7 @@ pub fn generate_message( if let Some(id) = &any_entry_ident { any_entry_paths.push(quote! { #id }); } - let non_map_nested: Vec<_> = msg + let non_map_nested: Vec<&DescriptorProto> = msg .nested_type .iter() .filter(|n| { @@ -492,7 +493,10 @@ pub fn generate_message( } }; + let message_doc = crate::comments::doc_attrs(ctx.comment(proto_fqn)); + let top_level = quote! { + #message_doc #[derive(Clone, PartialEq, #derive_default)] #serde_struct_derive #arbitrary_derive @@ -1054,7 +1058,9 @@ fn generate_field( )?; let rust_name = make_field_ident(field_name); - let doc = format!(" Field {field_number}: `{field_name}`"); + let field_fqn = format!("{}.{}", proto_fqn, field_name); + let tag_line = format!("Field {field_number}: `{field_name}`"); + let doc = crate::comments::doc_attrs_with_tag(ctx.comment(&field_fqn), &tag_line); let serde_attr = if ctx.config.generate_json { serde_field_attr(ctx, field, field_name, &info, features) } else { @@ -1062,7 +1068,7 @@ fn generate_field( }; let rust_type = &info.rust_type; let tokens = quote! { - #[doc = #doc] + #doc #serde_attr pub #rust_name: #rust_type, }; diff --git a/buffa-codegen/src/oneof.rs b/buffa-codegen/src/oneof.rs index cef40d2..782da34 100644 --- a/buffa-codegen/src/oneof.rs +++ b/buffa-codegen/src/oneof.rs @@ -233,7 +233,11 @@ pub fn generate_oneof_enum( quote! {} }; + let oneof_fqn = format!("{}.{}", proto_fqn, oneof_name); + let oneof_doc = crate::comments::doc_attrs(ctx.comment(&oneof_fqn)); + Ok(quote! { + #oneof_doc #[derive(Clone, PartialEq, Debug)] #arbitrary_derive pub enum #rust_enum_ident { diff --git a/buffa-codegen/src/tests/comments.rs b/buffa-codegen/src/tests/comments.rs new file mode 100644 index 0000000..d469c8c --- /dev/null +++ b/buffa-codegen/src/tests/comments.rs @@ -0,0 +1,214 @@ +//! Tests for source code comment propagation into generated Rust code. + +use super::*; +use crate::generated::descriptor::{source_code_info::Location, SourceCodeInfo}; + +fn make_location(path: Vec, leading: &str) -> Location { + Location { + path, + leading_comments: Some(leading.to_string()), + ..Default::default() + } +} + +#[test] +fn test_message_comment_in_generated_code() { + let mut file = proto3_file("commented.proto"); + file.message_type.push(DescriptorProto { + name: Some("Person".to_string()), + field: vec![make_field( + "name", + 1, + Label::LABEL_OPTIONAL, + Type::TYPE_STRING, + )], + ..Default::default() + }); + // Path [4, 0] = FileDescriptorProto.message_type[0] + let mut sci = SourceCodeInfo::default(); + sci.location.push(make_location( + vec![4, 0], + " Represents a person in the system.\n", + )); + file.source_code_info = sci.into(); + + let result = generate( + &[file], + &["commented.proto".to_string()], + &CodeGenConfig::default(), + ) + .expect("generation should succeed"); + + let content = &result[0].content; + assert!( + content.contains("Represents a person in the system."), + "message doc comment should appear in generated code, got:\n{content}" + ); +} + +#[test] +fn test_field_comment_in_generated_code() { + let mut file = proto3_file("field_comment.proto"); + file.message_type.push(DescriptorProto { + name: Some("User".to_string()), + field: vec![make_field( + "email", + 1, + Label::LABEL_OPTIONAL, + Type::TYPE_STRING, + )], + ..Default::default() + }); + // Path [4, 0, 2, 0] = message_type[0].field[0] + let mut sci = SourceCodeInfo::default(); + sci.location.push(make_location( + vec![4, 0, 2, 0], + " The user's email address.\n", + )); + file.source_code_info = sci.into(); + + let result = generate( + &[file], + &["field_comment.proto".to_string()], + &CodeGenConfig::default(), + ) + .expect("generation should succeed"); + + let content = &result[0].content; + assert!( + content.contains("The user's email address."), + "field doc comment should appear in generated code, got:\n{content}" + ); +} + +#[test] +fn test_enum_comment_in_generated_code() { + let mut file = proto3_file("enum_comment.proto"); + file.enum_type.push(EnumDescriptorProto { + name: Some("Color".to_string()), + value: vec![enum_value("UNSPECIFIED", 0), enum_value("RED", 1)], + ..Default::default() + }); + let mut sci = SourceCodeInfo::default(); + // Path [5, 0] = enum_type[0] + sci.location + .push(make_location(vec![5, 0], " Available colors.\n")); + // Path [5, 0, 2, 1] = enum_type[0].value[1] (RED) + sci.location + .push(make_location(vec![5, 0, 2, 1], " The color red.\n")); + file.source_code_info = sci.into(); + + let result = generate( + &[file], + &["enum_comment.proto".to_string()], + &CodeGenConfig::default(), + ) + .expect("generation should succeed"); + + let content = &result[0].content; + assert!( + content.contains("Available colors."), + "enum doc comment should appear, got:\n{content}" + ); + assert!( + content.contains("The color red."), + "enum value doc comment should appear, got:\n{content}" + ); +} + +#[test] +fn test_oneof_comment_in_generated_code() { + let mut file = proto3_file("oneof_comment.proto"); + file.message_type.push(DescriptorProto { + name: Some("Event".to_string()), + field: vec![ + { + let mut f = make_field("text", 1, Label::LABEL_OPTIONAL, Type::TYPE_STRING); + f.oneof_index = Some(0); + f + }, + { + let mut f = make_field("number", 2, Label::LABEL_OPTIONAL, Type::TYPE_INT32); + f.oneof_index = Some(0); + f + }, + ], + oneof_decl: vec![OneofDescriptorProto { + name: Some("payload".to_string()), + ..Default::default() + }], + ..Default::default() + }); + let mut sci = SourceCodeInfo::default(); + // Path [4, 0, 8, 0] = message_type[0].oneof_decl[0] + sci.location.push(make_location( + vec![4, 0, 8, 0], + " The event payload variant.\n", + )); + file.source_code_info = sci.into(); + + let result = generate( + &[file], + &["oneof_comment.proto".to_string()], + &CodeGenConfig::default(), + ) + .expect("generation should succeed"); + + let content = &result[0].content; + assert!( + content.contains("The event payload variant."), + "oneof doc comment should appear, got:\n{content}" + ); +} + +#[test] +fn test_no_source_code_info_still_generates() { + // Ensure we don't crash when source_code_info is absent + let mut file = proto3_file("no_sci.proto"); + file.message_type.push(DescriptorProto { + name: Some("Empty".to_string()), + ..Default::default() + }); + // No source_code_info set + + let result = generate( + &[file], + &["no_sci.proto".to_string()], + &CodeGenConfig::default(), + ); + assert!(result.is_ok(), "should generate without source_code_info"); +} + +#[test] +fn test_view_gets_same_comment_as_message() { + let mut file = proto3_file("view_comment.proto"); + file.message_type.push(DescriptorProto { + name: Some("Greeter".to_string()), + field: vec![make_field( + "name", + 1, + Label::LABEL_OPTIONAL, + Type::TYPE_STRING, + )], + ..Default::default() + }); + let mut sci = SourceCodeInfo::default(); + sci.location + .push(make_location(vec![4, 0], " A greeter message.\n")); + file.source_code_info = sci.into(); + + let config = CodeGenConfig { + generate_views: true, + ..Default::default() + }; + let result = generate(&[file], &["view_comment.proto".to_string()], &config) + .expect("generation should succeed"); + + let content = &result[0].content; + // The comment should appear on both the owned struct and the view struct + let count = content.matches("A greeter message.").count(); + assert!( + count >= 2, + "comment should appear on both Greeter and GreeterView, found {count} occurrence(s)" + ); +} diff --git a/buffa-codegen/src/tests/mod.rs b/buffa-codegen/src/tests/mod.rs index 1ac94be..2062050 100644 --- a/buffa-codegen/src/tests/mod.rs +++ b/buffa-codegen/src/tests/mod.rs @@ -36,6 +36,7 @@ pub(super) fn make_field(name: &str, number: i32, label: Label, ty: Type) -> Fie } } +mod comments; mod generation; mod json_codegen; mod naming; diff --git a/buffa-codegen/src/view.rs b/buffa-codegen/src/view.rs index 022f271..c664b69 100644 --- a/buffa-codegen/src/view.rs +++ b/buffa-codegen/src/view.rs @@ -91,7 +91,7 @@ pub fn generate_view( .field .iter() .filter(|f| is_supported_field_type(f.r#type.unwrap_or_default())) - .map(|f| view_struct_field(ctx, msg, f, current_package, features)) + .map(|f| view_struct_field(ctx, msg, f, current_package, proto_fqn, features)) .collect::, _>>()? .into_iter() .flatten() @@ -162,7 +162,10 @@ pub fn generate_view( #(#oneof_view_enums)* }; + let view_doc = crate::comments::doc_attrs(ctx.comment(proto_fqn)); + let top_level = quote! { + #view_doc #[derive(Clone, Debug, Default)] pub struct #view_ident<'a> { #(#direct_fields)* @@ -292,6 +295,7 @@ fn view_struct_field( msg: &DescriptorProto, field: &FieldDescriptorProto, current_package: &str, + proto_fqn: &str, features: &ResolvedFeatures, ) -> Result, CodeGenError> { // Real oneof members go into the oneof enum, not directly on the struct. @@ -305,21 +309,25 @@ fn view_struct_field( .ok_or(CodeGenError::MissingField("field.name"))?; let label = field.label.unwrap_or_default(); let is_repeated = label == Label::LABEL_REPEATED; + let field_fqn = format!("{}.{}", proto_fqn, field_name); + let proto_comment = ctx.comment(&field_fqn); if is_repeated && is_map_field(msg, field) { let ident = make_field_ident(field_name); let number = field.number.unwrap_or(0); - let doc = format!(" Field {number}: `{field_name}` (map)"); + let tag_line = format!("Field {number}: `{field_name}` (map)"); + let doc = crate::comments::doc_attrs_with_tag(proto_comment, &tag_line); let map_ty = view_map_type(ctx, msg, field, current_package, features)?; return Ok(Some(quote! { - #[doc = #doc] + #doc pub #ident: #map_ty, })); } let ident = make_field_ident(field_name); let number = field.number.unwrap_or(0); - let doc = format!(" Field {number}: `{field_name}`"); + let tag_line = format!("Field {number}: `{field_name}`"); + let doc = crate::comments::doc_attrs_with_tag(proto_comment, &tag_line); let rust_type = if is_repeated { view_repeated_type(ctx, field, current_package, features)? @@ -328,7 +336,7 @@ fn view_struct_field( }; Ok(Some(quote! { - #[doc = #doc] + #doc pub #ident: #rust_type, })) } diff --git a/buffa-types/src/generated/google.protobuf.any.rs b/buffa-types/src/generated/google.protobuf.any.rs index 6a756eb..330a0ab 100644 --- a/buffa-types/src/generated/google.protobuf.any.rs +++ b/buffa-types/src/generated/google.protobuf.any.rs @@ -1,11 +1,139 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: google/protobuf/any.proto +/// `Any` contains an arbitrary serialized protocol buffer message along with a +/// URL that describes the type of the serialized message. +/// +/// Protobuf library provides support to pack/unpack Any values in the form +/// of utility functions or additional generated methods of the Any type. +/// +/// Example 1: Pack and unpack a message in C++. +/// +/// ```text +/// Foo foo = ...; +/// Any any; +/// any.PackFrom(foo); +/// ... +/// if (any.UnpackTo(&foo)) { +/// ... +/// } +/// ``` +/// +/// Example 2: Pack and unpack a message in Java. +/// +/// ```text +/// Foo foo = ...; +/// Any any = Any.pack(foo); +/// ... +/// if (any.is(Foo.class)) { +/// foo = any.unpack(Foo.class); +/// } +/// // or ... +/// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +/// foo = any.unpack(Foo.getDefaultInstance()); +/// } +/// ``` +/// +/// Example 3: Pack and unpack a message in Python. +/// +/// ```text +/// foo = Foo(...) +/// any = Any() +/// any.Pack(foo) +/// ... +/// if any.Is(Foo.DESCRIPTOR): +/// any.Unpack(foo) +/// ... +/// ``` +/// +/// Example 4: Pack and unpack a message in Go +/// +/// ```text +/// foo := &pb.Foo{...} +/// any, err := anypb.New(foo) +/// if err != nil { +/// ... +/// } +/// ... +/// foo := &pb.Foo{} +/// if err := any.UnmarshalTo(foo); err != nil { +/// ... +/// } +/// ``` +/// +/// The pack methods provided by protobuf library will by default use +/// 'type.googleapis.com/full.type.name' as the type URL and the unpack +/// methods only use the fully qualified type name after the last '/' +/// in the type URL, for example "foo.bar.com/x/y.z" will yield type +/// name "y.z". +/// +/// JSON +/// ==== +/// The JSON representation of an `Any` value uses the regular +/// representation of the deserialized, embedded message, with an +/// additional field `@type` which contains the type URL. Example: +/// +/// ```text +/// package google.profile; +/// message Person { +/// string first_name = 1; +/// string last_name = 2; +/// } +/// +/// { +/// "@type": "type.googleapis.com/google.profile.Person", +/// "firstName": , +/// "lastName": +/// } +/// ``` +/// +/// If the embedded message type is well-known and has a custom JSON +/// representation, that representation will be embedded adding a field +/// `value` which holds the custom JSON in addition to the `@type` +/// field. Example (for message [google.protobuf.Duration][]): +/// +/// ```text +/// { +/// "@type": "type.googleapis.com/google.protobuf.Duration", +/// "value": "1.212s" +/// } +/// ``` #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct Any { + /// A URL/resource name that uniquely identifies the type of the serialized + /// protocol buffer message. This string must contain at least + /// one "/" character. The last segment of the URL's path must represent + /// the fully qualified name of the type (as in + /// `path/google.protobuf.Duration`). The name should be in a canonical form + /// (e.g., leading "." is not accepted). + /// + /// In practice, teams usually precompile into the binary all types that they + /// expect it to use in the context of Any. However, for URLs which use the + /// scheme `http`, `https`, or no scheme, one can optionally set up a type + /// server that maps type URLs to message definitions as follows: + /// + /// * If no scheme is provided, `https` is assumed. + /// * An HTTP GET on the URL must yield a [google.protobuf.Type][] + /// value in binary format, or produce an error. + /// * Applications are allowed to cache lookup results based on the + /// URL, or have them precompiled into a binary to avoid any + /// lookup. Therefore, binary compatibility needs to be preserved + /// on changes to types. (Use versioned type names to manage + /// breaking changes.) + /// + /// Note: this functionality is not currently available in the official + /// protobuf release, and it is not used for type URLs beginning with + /// type.googleapis.com. As of May 2023, there are no widely used type server + /// implementations and no plans to implement one. + /// + /// Schemes other than `http`, `https` (or the empty scheme) might be + /// used with implementation specific semantics. + /// /// Field 1: `type_url` pub type_url: ::buffa::alloc::string::String, + /// Must be a valid serialized protocol buffer of the above specified type. + /// /// Field 2: `value` pub value: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -132,10 +260,138 @@ impl ::buffa::ExtensionSet for Any { &mut self.__buffa_unknown_fields } } +/// `Any` contains an arbitrary serialized protocol buffer message along with a +/// URL that describes the type of the serialized message. +/// +/// Protobuf library provides support to pack/unpack Any values in the form +/// of utility functions or additional generated methods of the Any type. +/// +/// Example 1: Pack and unpack a message in C++. +/// +/// ```text +/// Foo foo = ...; +/// Any any; +/// any.PackFrom(foo); +/// ... +/// if (any.UnpackTo(&foo)) { +/// ... +/// } +/// ``` +/// +/// Example 2: Pack and unpack a message in Java. +/// +/// ```text +/// Foo foo = ...; +/// Any any = Any.pack(foo); +/// ... +/// if (any.is(Foo.class)) { +/// foo = any.unpack(Foo.class); +/// } +/// // or ... +/// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +/// foo = any.unpack(Foo.getDefaultInstance()); +/// } +/// ``` +/// +/// Example 3: Pack and unpack a message in Python. +/// +/// ```text +/// foo = Foo(...) +/// any = Any() +/// any.Pack(foo) +/// ... +/// if any.Is(Foo.DESCRIPTOR): +/// any.Unpack(foo) +/// ... +/// ``` +/// +/// Example 4: Pack and unpack a message in Go +/// +/// ```text +/// foo := &pb.Foo{...} +/// any, err := anypb.New(foo) +/// if err != nil { +/// ... +/// } +/// ... +/// foo := &pb.Foo{} +/// if err := any.UnmarshalTo(foo); err != nil { +/// ... +/// } +/// ``` +/// +/// The pack methods provided by protobuf library will by default use +/// 'type.googleapis.com/full.type.name' as the type URL and the unpack +/// methods only use the fully qualified type name after the last '/' +/// in the type URL, for example "foo.bar.com/x/y.z" will yield type +/// name "y.z". +/// +/// JSON +/// ==== +/// The JSON representation of an `Any` value uses the regular +/// representation of the deserialized, embedded message, with an +/// additional field `@type` which contains the type URL. Example: +/// +/// ```text +/// package google.profile; +/// message Person { +/// string first_name = 1; +/// string last_name = 2; +/// } +/// +/// { +/// "@type": "type.googleapis.com/google.profile.Person", +/// "firstName": , +/// "lastName": +/// } +/// ``` +/// +/// If the embedded message type is well-known and has a custom JSON +/// representation, that representation will be embedded adding a field +/// `value` which holds the custom JSON in addition to the `@type` +/// field. Example (for message [google.protobuf.Duration][]): +/// +/// ```text +/// { +/// "@type": "type.googleapis.com/google.protobuf.Duration", +/// "value": "1.212s" +/// } +/// ``` #[derive(Clone, Debug, Default)] pub struct AnyView<'a> { + /// A URL/resource name that uniquely identifies the type of the serialized + /// protocol buffer message. This string must contain at least + /// one "/" character. The last segment of the URL's path must represent + /// the fully qualified name of the type (as in + /// `path/google.protobuf.Duration`). The name should be in a canonical form + /// (e.g., leading "." is not accepted). + /// + /// In practice, teams usually precompile into the binary all types that they + /// expect it to use in the context of Any. However, for URLs which use the + /// scheme `http`, `https`, or no scheme, one can optionally set up a type + /// server that maps type URLs to message definitions as follows: + /// + /// * If no scheme is provided, `https` is assumed. + /// * An HTTP GET on the URL must yield a [google.protobuf.Type][] + /// value in binary format, or produce an error. + /// * Applications are allowed to cache lookup results based on the + /// URL, or have them precompiled into a binary to avoid any + /// lookup. Therefore, binary compatibility needs to be preserved + /// on changes to types. (Use versioned type names to manage + /// breaking changes.) + /// + /// Note: this functionality is not currently available in the official + /// protobuf release, and it is not used for type URLs beginning with + /// type.googleapis.com. As of May 2023, there are no widely used type server + /// implementations and no plans to implement one. + /// + /// Schemes other than `http`, `https` (or the empty scheme) might be + /// used with implementation specific semantics. + /// /// Field 1: `type_url` pub type_url: &'a str, + /// Must be a valid serialized protocol buffer of the above specified type. + /// /// Field 2: `value` pub value: &'a [u8], pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, diff --git a/buffa-types/src/generated/google.protobuf.duration.rs b/buffa-types/src/generated/google.protobuf.duration.rs index 429ee4c..884bba1 100644 --- a/buffa-types/src/generated/google.protobuf.duration.rs +++ b/buffa-types/src/generated/google.protobuf.duration.rs @@ -1,11 +1,86 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: google/protobuf/duration.proto +/// A Duration represents a signed, fixed-length span of time represented +/// as a count of seconds and fractions of seconds at nanosecond +/// resolution. It is independent of any calendar and concepts like "day" +/// or "month". It is related to Timestamp in that the difference between +/// two Timestamp values is a Duration and it can be added or subtracted +/// from a Timestamp. Range is approximately +-10,000 years. +/// +/// # Examples +/// +/// Example 1: Compute Duration from two Timestamps in pseudo code. +/// +/// ```text +/// Timestamp start = ...; +/// Timestamp end = ...; +/// Duration duration = ...; +/// +/// duration.seconds = end.seconds - start.seconds; +/// duration.nanos = end.nanos - start.nanos; +/// +/// if (duration.seconds < 0 && duration.nanos > 0) { +/// duration.seconds += 1; +/// duration.nanos -= 1000000000; +/// } else if (duration.seconds > 0 && duration.nanos < 0) { +/// duration.seconds -= 1; +/// duration.nanos += 1000000000; +/// } +/// ``` +/// +/// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +/// +/// ```text +/// Timestamp start = ...; +/// Duration duration = ...; +/// Timestamp end = ...; +/// +/// end.seconds = start.seconds + duration.seconds; +/// end.nanos = start.nanos + duration.nanos; +/// +/// if (end.nanos < 0) { +/// end.seconds -= 1; +/// end.nanos += 1000000000; +/// } else if (end.nanos >= 1000000000) { +/// end.seconds += 1; +/// end.nanos -= 1000000000; +/// } +/// ``` +/// +/// Example 3: Compute Duration from datetime.timedelta in Python. +/// +/// ```text +/// td = datetime.timedelta(days=3, minutes=10) +/// duration = Duration() +/// duration.FromTimedelta(td) +/// ``` +/// +/// # JSON Mapping +/// +/// In JSON format, the Duration type is encoded as a string rather than an +/// object, where the string ends in the suffix "s" (indicating seconds) and +/// is preceded by the number of seconds, with nanoseconds expressed as +/// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +/// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +/// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +/// microsecond should be expressed in JSON format as "3.000001s". #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct Duration { + /// Signed seconds of the span of time. Must be from -315,576,000,000 + /// to +315,576,000,000 inclusive. Note: these bounds are computed from: + /// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + /// /// Field 1: `seconds` pub seconds: i64, + /// Signed fractions of a second at nanosecond resolution of the span + /// of time. Durations less than one second are represented with a 0 + /// `seconds` field and a positive or negative `nanos` field. For durations + /// of one second or more, a non-zero value for the `nanos` field must be + /// of the same sign as the `seconds` field. Must be from -999,999,999 + /// to +999,999,999 inclusive. + /// /// Field 2: `nanos` pub nanos: i32, #[doc(hidden)] @@ -126,10 +201,85 @@ impl ::buffa::ExtensionSet for Duration { &mut self.__buffa_unknown_fields } } +/// A Duration represents a signed, fixed-length span of time represented +/// as a count of seconds and fractions of seconds at nanosecond +/// resolution. It is independent of any calendar and concepts like "day" +/// or "month". It is related to Timestamp in that the difference between +/// two Timestamp values is a Duration and it can be added or subtracted +/// from a Timestamp. Range is approximately +-10,000 years. +/// +/// # Examples +/// +/// Example 1: Compute Duration from two Timestamps in pseudo code. +/// +/// ```text +/// Timestamp start = ...; +/// Timestamp end = ...; +/// Duration duration = ...; +/// +/// duration.seconds = end.seconds - start.seconds; +/// duration.nanos = end.nanos - start.nanos; +/// +/// if (duration.seconds < 0 && duration.nanos > 0) { +/// duration.seconds += 1; +/// duration.nanos -= 1000000000; +/// } else if (duration.seconds > 0 && duration.nanos < 0) { +/// duration.seconds -= 1; +/// duration.nanos += 1000000000; +/// } +/// ``` +/// +/// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +/// +/// ```text +/// Timestamp start = ...; +/// Duration duration = ...; +/// Timestamp end = ...; +/// +/// end.seconds = start.seconds + duration.seconds; +/// end.nanos = start.nanos + duration.nanos; +/// +/// if (end.nanos < 0) { +/// end.seconds -= 1; +/// end.nanos += 1000000000; +/// } else if (end.nanos >= 1000000000) { +/// end.seconds += 1; +/// end.nanos -= 1000000000; +/// } +/// ``` +/// +/// Example 3: Compute Duration from datetime.timedelta in Python. +/// +/// ```text +/// td = datetime.timedelta(days=3, minutes=10) +/// duration = Duration() +/// duration.FromTimedelta(td) +/// ``` +/// +/// # JSON Mapping +/// +/// In JSON format, the Duration type is encoded as a string rather than an +/// object, where the string ends in the suffix "s" (indicating seconds) and +/// is preceded by the number of seconds, with nanoseconds expressed as +/// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +/// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +/// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +/// microsecond should be expressed in JSON format as "3.000001s". #[derive(Clone, Debug, Default)] pub struct DurationView<'a> { + /// Signed seconds of the span of time. Must be from -315,576,000,000 + /// to +315,576,000,000 inclusive. Note: these bounds are computed from: + /// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + /// /// Field 1: `seconds` pub seconds: i64, + /// Signed fractions of a second at nanosecond resolution of the span + /// of time. Durations less than one second are represented with a 0 + /// `seconds` field and a positive or negative `nanos` field. For durations + /// of one second or more, a non-zero value for the `nanos` field must be + /// of the same sign as the `seconds` field. Must be from -999,999,999 + /// to +999,999,999 inclusive. + /// /// Field 2: `nanos` pub nanos: i32, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, diff --git a/buffa-types/src/generated/google.protobuf.empty.rs b/buffa-types/src/generated/google.protobuf.empty.rs index 2c11c6e..66cf514 100644 --- a/buffa-types/src/generated/google.protobuf.empty.rs +++ b/buffa-types/src/generated/google.protobuf.empty.rs @@ -1,6 +1,15 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: google/protobuf/empty.proto +/// A generic empty message that you can re-use to avoid defining duplicated +/// empty messages in your APIs. +/// +/// Example usage: gRPC uses google.protobuf.Empty as the input and output +/// type for RPCs defined as returning or accepting "nothing": +/// +/// ```text +/// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +/// ``` #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct Empty { @@ -81,6 +90,15 @@ impl ::buffa::ExtensionSet for Empty { &mut self.__buffa_unknown_fields } } +/// A generic empty message that you can re-use to avoid defining duplicated +/// empty messages in your APIs. +/// +/// Example usage: gRPC uses google.protobuf.Empty as the input and output +/// type for RPCs defined as returning or accepting "nothing": +/// +/// ```text +/// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +/// ``` #[derive(Clone, Debug, Default)] pub struct EmptyView<'a> { pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, diff --git a/buffa-types/src/generated/google.protobuf.field_mask.rs b/buffa-types/src/generated/google.protobuf.field_mask.rs index 1f4eb76..82c21ff 100644 --- a/buffa-types/src/generated/google.protobuf.field_mask.rs +++ b/buffa-types/src/generated/google.protobuf.field_mask.rs @@ -1,9 +1,234 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: google/protobuf/field_mask.proto +/// `FieldMask` represents a set of symbolic field paths, for example: +/// +/// ```text +/// paths: "f.a" +/// paths: "f.b.d" +/// ``` +/// +/// Here `f` represents a field in some root message, `a` and `b` +/// fields in the message found in `f`, and `d` a field found in the +/// message in `f.b`. +/// +/// Field masks are used to specify a subset of fields that should be +/// returned by a get operation or modified by an update operation. +/// Field masks also have a custom JSON encoding (see below). +/// +/// # Field Masks in Projections +/// +/// When used in the context of a projection, a response message or +/// sub-message is filtered by the API to only contain those fields as +/// specified in the mask. For example, if the mask in the previous +/// example is applied to a response message as follows: +/// +/// ```text +/// f { +/// a : 22 +/// b { +/// d : 1 +/// x : 2 +/// } +/// y : 13 +/// } +/// z: 8 +/// ``` +/// +/// The result will not contain specific values for fields x,y and z +/// (their value will be set to the default, and omitted in proto text +/// output): +/// +/// +/// ```text +/// f { +/// a : 22 +/// b { +/// d : 1 +/// } +/// } +/// ``` +/// +/// A repeated field is not allowed except at the last position of a +/// paths string. +/// +/// If a FieldMask object is not present in a get operation, the +/// operation applies to all fields (as if a FieldMask of all fields +/// had been specified). +/// +/// Note that a field mask does not necessarily apply to the +/// top-level response message. In case of a REST get operation, the +/// field mask applies directly to the response, but in case of a REST +/// list operation, the mask instead applies to each individual message +/// in the returned resource list. In case of a REST custom method, +/// other definitions may be used. Where the mask applies will be +/// clearly documented together with its declaration in the API. In +/// any case, the effect on the returned resource/resources is required +/// behavior for APIs. +/// +/// # Field Masks in Update Operations +/// +/// A field mask in update operations specifies which fields of the +/// targeted resource are going to be updated. The API is required +/// to only change the values of the fields as specified in the mask +/// and leave the others untouched. If a resource is passed in to +/// describe the updated values, the API ignores the values of all +/// fields not covered by the mask. +/// +/// If a repeated field is specified for an update operation, new values will +/// be appended to the existing repeated field in the target resource. Note that +/// a repeated field is only allowed in the last position of a `paths` string. +/// +/// If a sub-message is specified in the last position of the field mask for an +/// update operation, then new value will be merged into the existing sub-message +/// in the target resource. +/// +/// For example, given the target message: +/// +/// ```text +/// f { +/// b { +/// d: 1 +/// x: 2 +/// } +/// c: [1] +/// } +/// ``` +/// +/// And an update message: +/// +/// ```text +/// f { +/// b { +/// d: 10 +/// } +/// c: [2] +/// } +/// ``` +/// +/// then if the field mask is: +/// +/// paths: ["f.b", "f.c"] +/// +/// then the result will be: +/// +/// ```text +/// f { +/// b { +/// d: 10 +/// x: 2 +/// } +/// c: [1, 2] +/// } +/// ``` +/// +/// An implementation may provide options to override this default behavior for +/// repeated and message fields. +/// +/// In order to reset a field's value to the default, the field must +/// be in the mask and set to the default value in the provided resource. +/// Hence, in order to reset all fields of a resource, provide a default +/// instance of the resource and set all fields in the mask, or do +/// not provide a mask as described below. +/// +/// If a field mask is not present on update, the operation applies to +/// all fields (as if a field mask of all fields has been specified). +/// Note that in the presence of schema evolution, this may mean that +/// fields the client does not know and has therefore not filled into +/// the request will be reset to their default. If this is unwanted +/// behavior, a specific service may require a client to always specify +/// a field mask, producing an error if not. +/// +/// As with get operations, the location of the resource which +/// describes the updated values in the request message depends on the +/// operation kind. In any case, the effect of the field mask is +/// required to be honored by the API. +/// +/// ## Considerations for HTTP REST +/// +/// The HTTP kind of an update operation which uses a field mask must +/// be set to PATCH instead of PUT in order to satisfy HTTP semantics +/// (PUT must only be used for full updates). +/// +/// # JSON Encoding of Field Masks +/// +/// In JSON, a field mask is encoded as a single string where paths are +/// separated by a comma. Fields name in each path are converted +/// to/from lower-camel naming conventions. +/// +/// As an example, consider the following message declarations: +/// +/// ```text +/// message Profile { +/// User user = 1; +/// Photo photo = 2; +/// } +/// message User { +/// string display_name = 1; +/// string address = 2; +/// } +/// ``` +/// +/// In proto a field mask for `Profile` may look as such: +/// +/// ```text +/// mask { +/// paths: "user.display_name" +/// paths: "photo" +/// } +/// ``` +/// +/// In JSON, the same mask is represented as below: +/// +/// ```text +/// { +/// mask: "user.displayName,photo" +/// } +/// ``` +/// +/// # Field Masks and Oneof Fields +/// +/// Field masks treat fields in oneofs just as regular fields. Consider the +/// following message: +/// +/// ```text +/// message SampleMessage { +/// oneof test_oneof { +/// string name = 4; +/// SubMessage sub_message = 9; +/// } +/// } +/// ``` +/// +/// The field mask can be: +/// +/// ```text +/// mask { +/// paths: "name" +/// } +/// ``` +/// +/// Or: +/// +/// ```text +/// mask { +/// paths: "sub_message" +/// } +/// ``` +/// +/// Note that oneof type names ("test_oneof" in this case) cannot be used in +/// paths. +/// +/// ## Field Mask Verification +/// +/// The implementation of any API method which has a FieldMask type field in the +/// request should verify the included field paths, and return an +/// `INVALID_ARGUMENT` error if any path is unmappable. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct FieldMask { + /// The set of field mask paths. + /// /// Field 1: `paths` pub paths: ::buffa::alloc::vec::Vec<::buffa::alloc::string::String>, #[doc(hidden)] @@ -105,8 +330,233 @@ impl ::buffa::ExtensionSet for FieldMask { &mut self.__buffa_unknown_fields } } +/// `FieldMask` represents a set of symbolic field paths, for example: +/// +/// ```text +/// paths: "f.a" +/// paths: "f.b.d" +/// ``` +/// +/// Here `f` represents a field in some root message, `a` and `b` +/// fields in the message found in `f`, and `d` a field found in the +/// message in `f.b`. +/// +/// Field masks are used to specify a subset of fields that should be +/// returned by a get operation or modified by an update operation. +/// Field masks also have a custom JSON encoding (see below). +/// +/// # Field Masks in Projections +/// +/// When used in the context of a projection, a response message or +/// sub-message is filtered by the API to only contain those fields as +/// specified in the mask. For example, if the mask in the previous +/// example is applied to a response message as follows: +/// +/// ```text +/// f { +/// a : 22 +/// b { +/// d : 1 +/// x : 2 +/// } +/// y : 13 +/// } +/// z: 8 +/// ``` +/// +/// The result will not contain specific values for fields x,y and z +/// (their value will be set to the default, and omitted in proto text +/// output): +/// +/// +/// ```text +/// f { +/// a : 22 +/// b { +/// d : 1 +/// } +/// } +/// ``` +/// +/// A repeated field is not allowed except at the last position of a +/// paths string. +/// +/// If a FieldMask object is not present in a get operation, the +/// operation applies to all fields (as if a FieldMask of all fields +/// had been specified). +/// +/// Note that a field mask does not necessarily apply to the +/// top-level response message. In case of a REST get operation, the +/// field mask applies directly to the response, but in case of a REST +/// list operation, the mask instead applies to each individual message +/// in the returned resource list. In case of a REST custom method, +/// other definitions may be used. Where the mask applies will be +/// clearly documented together with its declaration in the API. In +/// any case, the effect on the returned resource/resources is required +/// behavior for APIs. +/// +/// # Field Masks in Update Operations +/// +/// A field mask in update operations specifies which fields of the +/// targeted resource are going to be updated. The API is required +/// to only change the values of the fields as specified in the mask +/// and leave the others untouched. If a resource is passed in to +/// describe the updated values, the API ignores the values of all +/// fields not covered by the mask. +/// +/// If a repeated field is specified for an update operation, new values will +/// be appended to the existing repeated field in the target resource. Note that +/// a repeated field is only allowed in the last position of a `paths` string. +/// +/// If a sub-message is specified in the last position of the field mask for an +/// update operation, then new value will be merged into the existing sub-message +/// in the target resource. +/// +/// For example, given the target message: +/// +/// ```text +/// f { +/// b { +/// d: 1 +/// x: 2 +/// } +/// c: [1] +/// } +/// ``` +/// +/// And an update message: +/// +/// ```text +/// f { +/// b { +/// d: 10 +/// } +/// c: [2] +/// } +/// ``` +/// +/// then if the field mask is: +/// +/// paths: ["f.b", "f.c"] +/// +/// then the result will be: +/// +/// ```text +/// f { +/// b { +/// d: 10 +/// x: 2 +/// } +/// c: [1, 2] +/// } +/// ``` +/// +/// An implementation may provide options to override this default behavior for +/// repeated and message fields. +/// +/// In order to reset a field's value to the default, the field must +/// be in the mask and set to the default value in the provided resource. +/// Hence, in order to reset all fields of a resource, provide a default +/// instance of the resource and set all fields in the mask, or do +/// not provide a mask as described below. +/// +/// If a field mask is not present on update, the operation applies to +/// all fields (as if a field mask of all fields has been specified). +/// Note that in the presence of schema evolution, this may mean that +/// fields the client does not know and has therefore not filled into +/// the request will be reset to their default. If this is unwanted +/// behavior, a specific service may require a client to always specify +/// a field mask, producing an error if not. +/// +/// As with get operations, the location of the resource which +/// describes the updated values in the request message depends on the +/// operation kind. In any case, the effect of the field mask is +/// required to be honored by the API. +/// +/// ## Considerations for HTTP REST +/// +/// The HTTP kind of an update operation which uses a field mask must +/// be set to PATCH instead of PUT in order to satisfy HTTP semantics +/// (PUT must only be used for full updates). +/// +/// # JSON Encoding of Field Masks +/// +/// In JSON, a field mask is encoded as a single string where paths are +/// separated by a comma. Fields name in each path are converted +/// to/from lower-camel naming conventions. +/// +/// As an example, consider the following message declarations: +/// +/// ```text +/// message Profile { +/// User user = 1; +/// Photo photo = 2; +/// } +/// message User { +/// string display_name = 1; +/// string address = 2; +/// } +/// ``` +/// +/// In proto a field mask for `Profile` may look as such: +/// +/// ```text +/// mask { +/// paths: "user.display_name" +/// paths: "photo" +/// } +/// ``` +/// +/// In JSON, the same mask is represented as below: +/// +/// ```text +/// { +/// mask: "user.displayName,photo" +/// } +/// ``` +/// +/// # Field Masks and Oneof Fields +/// +/// Field masks treat fields in oneofs just as regular fields. Consider the +/// following message: +/// +/// ```text +/// message SampleMessage { +/// oneof test_oneof { +/// string name = 4; +/// SubMessage sub_message = 9; +/// } +/// } +/// ``` +/// +/// The field mask can be: +/// +/// ```text +/// mask { +/// paths: "name" +/// } +/// ``` +/// +/// Or: +/// +/// ```text +/// mask { +/// paths: "sub_message" +/// } +/// ``` +/// +/// Note that oneof type names ("test_oneof" in this case) cannot be used in +/// paths. +/// +/// ## Field Mask Verification +/// +/// The implementation of any API method which has a FieldMask type field in the +/// request should verify the included field paths, and return an +/// `INVALID_ARGUMENT` error if any path is unmappable. #[derive(Clone, Debug, Default)] pub struct FieldMaskView<'a> { + /// The set of field mask paths. + /// /// Field 1: `paths` pub paths: ::buffa::RepeatedView<'a, &'a str>, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, diff --git a/buffa-types/src/generated/google.protobuf.struct.rs b/buffa-types/src/generated/google.protobuf.struct.rs index 2be1368..aecf1d0 100644 --- a/buffa-types/src/generated/google.protobuf.struct.rs +++ b/buffa-types/src/generated/google.protobuf.struct.rs @@ -1,10 +1,15 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: google/protobuf/struct.proto +/// `NullValue` is a singleton enumeration to represent the null value for the +/// `Value` type union. +/// +/// The JSON representation for `NullValue` is JSON `null`. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] #[repr(i32)] pub enum NullValue { + /// Null value. NULL_VALUE = 0i32, } impl ::core::default::Default for NullValue { @@ -34,9 +39,19 @@ impl ::buffa::Enumeration for NullValue { } } } +/// `Struct` represents a structured data value, consisting of fields +/// which map to dynamically typed values. In some languages, `Struct` +/// might be supported by a native representation. For example, in +/// scripting languages like JS a struct is represented as an +/// object. The details of that representation are described together +/// with the proto support for the language. +/// +/// The JSON representation for `Struct` is JSON object. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct Struct { + /// Unordered map of dynamically typed values. + /// /// Field 1: `fields` pub fields: ::buffa::__private::HashMap<::buffa::alloc::string::String, Value>, #[doc(hidden)] @@ -222,8 +237,18 @@ impl ::buffa::ExtensionSet for Struct { &mut self.__buffa_unknown_fields } } +/// `Struct` represents a structured data value, consisting of fields +/// which map to dynamically typed values. In some languages, `Struct` +/// might be supported by a native representation. For example, in +/// scripting languages like JS a struct is represented as an +/// object. The details of that representation are described together +/// with the proto support for the language. +/// +/// The JSON representation for `Struct` is JSON object. #[derive(Clone, Debug, Default)] pub struct StructView<'a> { + /// Unordered map of dynamically typed values. + /// /// Field 1: `fields` (map) pub fields: ::buffa::MapView<'a, &'a str, ValueView<'a>>, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, @@ -370,6 +395,12 @@ unsafe impl ::buffa::DefaultViewInstance for StructView<'static> { unsafe impl<'a> ::buffa::HasDefaultViewInstance for StructView<'a> { type Static = StructView<'static>; } +/// `Value` represents a dynamically typed value which can be either +/// null, a number, a string, a boolean, a recursive struct value, or a +/// list of values. A producer of value is expected to set one of these +/// variants. Absence of any variant indicates an error. +/// +/// The JSON representation for `Value` is JSON value. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct Value { @@ -634,6 +665,12 @@ impl ::buffa::ExtensionSet for Value { &mut self.__buffa_unknown_fields } } +/// `Value` represents a dynamically typed value which can be either +/// null, a number, a string, a boolean, a recursive struct value, or a +/// list of values. A producer of value is expected to set one of these +/// variants. Absence of any variant indicates an error. +/// +/// The JSON representation for `Value` is JSON value. #[derive(Clone, Debug, Default)] pub struct ValueView<'a> { pub kind: ::core::option::Option>, @@ -855,6 +892,7 @@ unsafe impl<'a> ::buffa::HasDefaultViewInstance for ValueView<'a> { pub mod value { #[allow(unused_imports)] use super::*; + /// The kind of value. #[derive(Clone, PartialEq, Debug)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub enum Kind { @@ -896,9 +934,14 @@ pub mod value { ListValue(::buffa::alloc::boxed::Box>), } } +/// `ListValue` is a wrapper around a repeated field of values. +/// +/// The JSON representation for `ListValue` is JSON array. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct ListValue { + /// Repeated field of dynamically typed values. + /// /// Field 1: `values` pub values: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -1006,8 +1049,13 @@ impl ::buffa::ExtensionSet for ListValue { &mut self.__buffa_unknown_fields } } +/// `ListValue` is a wrapper around a repeated field of values. +/// +/// The JSON representation for `ListValue` is JSON array. #[derive(Clone, Debug, Default)] pub struct ListValueView<'a> { + /// Repeated field of dynamically typed values. + /// /// Field 1: `values` pub values: ::buffa::RepeatedView<'a, ValueView<'a>>, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, diff --git a/buffa-types/src/generated/google.protobuf.timestamp.rs b/buffa-types/src/generated/google.protobuf.timestamp.rs index ab55f3e..aa140d8 100644 --- a/buffa-types/src/generated/google.protobuf.timestamp.rs +++ b/buffa-types/src/generated/google.protobuf.timestamp.rs @@ -1,11 +1,121 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: google/protobuf/timestamp.proto +/// A Timestamp represents a point in time independent of any time zone or local +/// calendar, encoded as a count of seconds and fractions of seconds at +/// nanosecond resolution. The count is relative to an epoch at UTC midnight on +/// January 1, 1970, in the proleptic Gregorian calendar which extends the +/// Gregorian calendar backwards to year one. +/// +/// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +/// second table is needed for interpretation, using a [24-hour linear +/// smear](https://developers.google.com/time/smear). +/// +/// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +/// restricting to that range, we ensure that we can convert to and from [RFC +/// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +/// +/// # Examples +/// +/// Example 1: Compute Timestamp from POSIX `time()`. +/// +/// ```text +/// Timestamp timestamp; +/// timestamp.set_seconds(time(NULL)); +/// timestamp.set_nanos(0); +/// ``` +/// +/// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +/// +/// ```text +/// struct timeval tv; +/// gettimeofday(&tv, NULL); +/// +/// Timestamp timestamp; +/// timestamp.set_seconds(tv.tv_sec); +/// timestamp.set_nanos(tv.tv_usec * 1000); +/// ``` +/// +/// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +/// +/// ```text +/// FILETIME ft; +/// GetSystemTimeAsFileTime(&ft); +/// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +/// +/// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +/// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +/// Timestamp timestamp; +/// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +/// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +/// ``` +/// +/// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +/// +/// ```text +/// long millis = System.currentTimeMillis(); +/// +/// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +/// .setNanos((int) ((millis % 1000) * 1000000)).build(); +/// ``` +/// +/// Example 5: Compute Timestamp from Java `Instant.now()`. +/// +/// ```text +/// Instant now = Instant.now(); +/// +/// Timestamp timestamp = +/// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +/// .setNanos(now.getNano()).build(); +/// ``` +/// +/// Example 6: Compute Timestamp from current time in Python. +/// +/// ```text +/// timestamp = Timestamp() +/// timestamp.GetCurrentTime() +/// ``` +/// +/// # JSON Mapping +/// +/// In JSON format, the Timestamp type is encoded as a string in the +/// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +/// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +/// where {year} is always expressed using four digits while {month}, {day}, +/// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +/// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +/// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +/// is required. A proto3 JSON serializer should always use UTC (as indicated by +/// "Z") when printing the Timestamp type and a proto3 JSON parser should be +/// able to accept both UTC and other timezones (as indicated by an offset). +/// +/// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +/// 01:30 UTC on January 15, 2017. +/// +/// In JavaScript, one can convert a Date object to this format using the +/// standard +/// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +/// method. In Python, a standard `datetime.datetime` object can be converted +/// to this format using +/// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +/// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +/// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +/// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() +/// ) to obtain a formatter capable of generating timestamps in this format. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct Timestamp { + /// Represents seconds of UTC time since Unix epoch + /// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + /// 9999-12-31T23:59:59Z inclusive. + /// /// Field 1: `seconds` pub seconds: i64, + /// Non-negative fractions of a second at nanosecond resolution. Negative + /// second values with fractions must still have non-negative nanos values + /// that count forward in time. Must be from 0 to 999,999,999 + /// inclusive. + /// /// Field 2: `nanos` pub nanos: i32, #[doc(hidden)] @@ -126,10 +236,120 @@ impl ::buffa::ExtensionSet for Timestamp { &mut self.__buffa_unknown_fields } } +/// A Timestamp represents a point in time independent of any time zone or local +/// calendar, encoded as a count of seconds and fractions of seconds at +/// nanosecond resolution. The count is relative to an epoch at UTC midnight on +/// January 1, 1970, in the proleptic Gregorian calendar which extends the +/// Gregorian calendar backwards to year one. +/// +/// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +/// second table is needed for interpretation, using a [24-hour linear +/// smear](https://developers.google.com/time/smear). +/// +/// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +/// restricting to that range, we ensure that we can convert to and from [RFC +/// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +/// +/// # Examples +/// +/// Example 1: Compute Timestamp from POSIX `time()`. +/// +/// ```text +/// Timestamp timestamp; +/// timestamp.set_seconds(time(NULL)); +/// timestamp.set_nanos(0); +/// ``` +/// +/// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +/// +/// ```text +/// struct timeval tv; +/// gettimeofday(&tv, NULL); +/// +/// Timestamp timestamp; +/// timestamp.set_seconds(tv.tv_sec); +/// timestamp.set_nanos(tv.tv_usec * 1000); +/// ``` +/// +/// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +/// +/// ```text +/// FILETIME ft; +/// GetSystemTimeAsFileTime(&ft); +/// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +/// +/// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +/// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +/// Timestamp timestamp; +/// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +/// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +/// ``` +/// +/// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +/// +/// ```text +/// long millis = System.currentTimeMillis(); +/// +/// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +/// .setNanos((int) ((millis % 1000) * 1000000)).build(); +/// ``` +/// +/// Example 5: Compute Timestamp from Java `Instant.now()`. +/// +/// ```text +/// Instant now = Instant.now(); +/// +/// Timestamp timestamp = +/// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +/// .setNanos(now.getNano()).build(); +/// ``` +/// +/// Example 6: Compute Timestamp from current time in Python. +/// +/// ```text +/// timestamp = Timestamp() +/// timestamp.GetCurrentTime() +/// ``` +/// +/// # JSON Mapping +/// +/// In JSON format, the Timestamp type is encoded as a string in the +/// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +/// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +/// where {year} is always expressed using four digits while {month}, {day}, +/// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +/// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +/// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +/// is required. A proto3 JSON serializer should always use UTC (as indicated by +/// "Z") when printing the Timestamp type and a proto3 JSON parser should be +/// able to accept both UTC and other timezones (as indicated by an offset). +/// +/// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +/// 01:30 UTC on January 15, 2017. +/// +/// In JavaScript, one can convert a Date object to this format using the +/// standard +/// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +/// method. In Python, a standard `datetime.datetime` object can be converted +/// to this format using +/// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +/// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +/// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +/// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() +/// ) to obtain a formatter capable of generating timestamps in this format. #[derive(Clone, Debug, Default)] pub struct TimestampView<'a> { + /// Represents seconds of UTC time since Unix epoch + /// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + /// 9999-12-31T23:59:59Z inclusive. + /// /// Field 1: `seconds` pub seconds: i64, + /// Non-negative fractions of a second at nanosecond resolution. Negative + /// second values with fractions must still have non-negative nanos values + /// that count forward in time. Must be from 0 to 999,999,999 + /// inclusive. + /// /// Field 2: `nanos` pub nanos: i32, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, diff --git a/buffa-types/src/generated/google.protobuf.wrappers.rs b/buffa-types/src/generated/google.protobuf.wrappers.rs index 8f9b901..7d581fc 100644 --- a/buffa-types/src/generated/google.protobuf.wrappers.rs +++ b/buffa-types/src/generated/google.protobuf.wrappers.rs @@ -1,9 +1,14 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: google/protobuf/wrappers.proto +/// Wrapper message for `double`. +/// +/// The JSON representation for `DoubleValue` is JSON number. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct DoubleValue { + /// The double value. + /// /// Field 1: `value` pub value: f64, #[doc(hidden)] @@ -102,8 +107,13 @@ impl ::buffa::ExtensionSet for DoubleValue { &mut self.__buffa_unknown_fields } } +/// Wrapper message for `double`. +/// +/// The JSON representation for `DoubleValue` is JSON number. #[derive(Clone, Debug, Default)] pub struct DoubleValueView<'a> { + /// The double value. + /// /// Field 1: `value` pub value: f64, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, @@ -202,9 +212,14 @@ unsafe impl ::buffa::DefaultViewInstance for DoubleValueView<'static> { unsafe impl<'a> ::buffa::HasDefaultViewInstance for DoubleValueView<'a> { type Static = DoubleValueView<'static>; } +/// Wrapper message for `float`. +/// +/// The JSON representation for `FloatValue` is JSON number. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct FloatValue { + /// The float value. + /// /// Field 1: `value` pub value: f32, #[doc(hidden)] @@ -303,8 +318,13 @@ impl ::buffa::ExtensionSet for FloatValue { &mut self.__buffa_unknown_fields } } +/// Wrapper message for `float`. +/// +/// The JSON representation for `FloatValue` is JSON number. #[derive(Clone, Debug, Default)] pub struct FloatValueView<'a> { + /// The float value. + /// /// Field 1: `value` pub value: f32, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, @@ -403,9 +423,14 @@ unsafe impl ::buffa::DefaultViewInstance for FloatValueView<'static> { unsafe impl<'a> ::buffa::HasDefaultViewInstance for FloatValueView<'a> { type Static = FloatValueView<'static>; } +/// Wrapper message for `int64`. +/// +/// The JSON representation for `Int64Value` is JSON string. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct Int64Value { + /// The int64 value. + /// /// Field 1: `value` pub value: i64, #[doc(hidden)] @@ -504,8 +529,13 @@ impl ::buffa::ExtensionSet for Int64Value { &mut self.__buffa_unknown_fields } } +/// Wrapper message for `int64`. +/// +/// The JSON representation for `Int64Value` is JSON string. #[derive(Clone, Debug, Default)] pub struct Int64ValueView<'a> { + /// The int64 value. + /// /// Field 1: `value` pub value: i64, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, @@ -604,9 +634,14 @@ unsafe impl ::buffa::DefaultViewInstance for Int64ValueView<'static> { unsafe impl<'a> ::buffa::HasDefaultViewInstance for Int64ValueView<'a> { type Static = Int64ValueView<'static>; } +/// Wrapper message for `uint64`. +/// +/// The JSON representation for `UInt64Value` is JSON string. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct UInt64Value { + /// The uint64 value. + /// /// Field 1: `value` pub value: u64, #[doc(hidden)] @@ -705,8 +740,13 @@ impl ::buffa::ExtensionSet for UInt64Value { &mut self.__buffa_unknown_fields } } +/// Wrapper message for `uint64`. +/// +/// The JSON representation for `UInt64Value` is JSON string. #[derive(Clone, Debug, Default)] pub struct UInt64ValueView<'a> { + /// The uint64 value. + /// /// Field 1: `value` pub value: u64, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, @@ -805,9 +845,14 @@ unsafe impl ::buffa::DefaultViewInstance for UInt64ValueView<'static> { unsafe impl<'a> ::buffa::HasDefaultViewInstance for UInt64ValueView<'a> { type Static = UInt64ValueView<'static>; } +/// Wrapper message for `int32`. +/// +/// The JSON representation for `Int32Value` is JSON number. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct Int32Value { + /// The int32 value. + /// /// Field 1: `value` pub value: i32, #[doc(hidden)] @@ -906,8 +951,13 @@ impl ::buffa::ExtensionSet for Int32Value { &mut self.__buffa_unknown_fields } } +/// Wrapper message for `int32`. +/// +/// The JSON representation for `Int32Value` is JSON number. #[derive(Clone, Debug, Default)] pub struct Int32ValueView<'a> { + /// The int32 value. + /// /// Field 1: `value` pub value: i32, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, @@ -1006,9 +1056,14 @@ unsafe impl ::buffa::DefaultViewInstance for Int32ValueView<'static> { unsafe impl<'a> ::buffa::HasDefaultViewInstance for Int32ValueView<'a> { type Static = Int32ValueView<'static>; } +/// Wrapper message for `uint32`. +/// +/// The JSON representation for `UInt32Value` is JSON number. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct UInt32Value { + /// The uint32 value. + /// /// Field 1: `value` pub value: u32, #[doc(hidden)] @@ -1107,8 +1162,13 @@ impl ::buffa::ExtensionSet for UInt32Value { &mut self.__buffa_unknown_fields } } +/// Wrapper message for `uint32`. +/// +/// The JSON representation for `UInt32Value` is JSON number. #[derive(Clone, Debug, Default)] pub struct UInt32ValueView<'a> { + /// The uint32 value. + /// /// Field 1: `value` pub value: u32, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, @@ -1207,9 +1267,14 @@ unsafe impl ::buffa::DefaultViewInstance for UInt32ValueView<'static> { unsafe impl<'a> ::buffa::HasDefaultViewInstance for UInt32ValueView<'a> { type Static = UInt32ValueView<'static>; } +/// Wrapper message for `bool`. +/// +/// The JSON representation for `BoolValue` is JSON `true` and `false`. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct BoolValue { + /// The bool value. + /// /// Field 1: `value` pub value: bool, #[doc(hidden)] @@ -1308,8 +1373,13 @@ impl ::buffa::ExtensionSet for BoolValue { &mut self.__buffa_unknown_fields } } +/// Wrapper message for `bool`. +/// +/// The JSON representation for `BoolValue` is JSON `true` and `false`. #[derive(Clone, Debug, Default)] pub struct BoolValueView<'a> { + /// The bool value. + /// /// Field 1: `value` pub value: bool, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, @@ -1408,9 +1478,14 @@ unsafe impl ::buffa::DefaultViewInstance for BoolValueView<'static> { unsafe impl<'a> ::buffa::HasDefaultViewInstance for BoolValueView<'a> { type Static = BoolValueView<'static>; } +/// Wrapper message for `string`. +/// +/// The JSON representation for `StringValue` is JSON string. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct StringValue { + /// The string value. + /// /// Field 1: `value` pub value: ::buffa::alloc::string::String, #[doc(hidden)] @@ -1512,8 +1587,13 @@ impl ::buffa::ExtensionSet for StringValue { &mut self.__buffa_unknown_fields } } +/// Wrapper message for `string`. +/// +/// The JSON representation for `StringValue` is JSON string. #[derive(Clone, Debug, Default)] pub struct StringValueView<'a> { + /// The string value. + /// /// Field 1: `value` pub value: &'a str, pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, @@ -1612,9 +1692,14 @@ unsafe impl ::buffa::DefaultViewInstance for StringValueView<'static> { unsafe impl<'a> ::buffa::HasDefaultViewInstance for StringValueView<'a> { type Static = StringValueView<'static>; } +/// Wrapper message for `bytes`. +/// +/// The JSON representation for `BytesValue` is JSON string. #[derive(Clone, PartialEq, Default)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] pub struct BytesValue { + /// The bytes value. + /// /// Field 1: `value` pub value: ::buffa::alloc::vec::Vec, #[doc(hidden)] @@ -1716,8 +1801,13 @@ impl ::buffa::ExtensionSet for BytesValue { &mut self.__buffa_unknown_fields } } +/// Wrapper message for `bytes`. +/// +/// The JSON representation for `BytesValue` is JSON string. #[derive(Clone, Debug, Default)] pub struct BytesValueView<'a> { + /// The bytes value. + /// /// Field 1: `value` pub value: &'a [u8], pub __buffa_unknown_fields: ::buffa::UnknownFieldsView<'a>, diff --git a/examples/logging/src/gen/context.v1.context.rs b/examples/logging/src/gen/context.v1.context.rs index 8af77e7..15e3db0 100644 --- a/examples/logging/src/gen/context.v1.context.rs +++ b/examples/logging/src/gen/context.v1.context.rs @@ -1,16 +1,27 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: context/v1/context.proto +/// Contextual information about the request that produced a log entry. #[derive(Clone, PartialEq, Default)] pub struct RequestContext { + /// A unique identifier for the request (e.g. trace ID). + /// /// Field 1: `request_id` pub request_id: ::buffa::alloc::string::String, + /// The authenticated user, if any. + /// /// Field 2: `user_id` pub user_id: ::buffa::alloc::string::String, + /// The HTTP method (GET, POST, etc.). + /// /// Field 3: `method` pub method: ::buffa::alloc::string::String, + /// The request path. + /// /// Field 4: `path` pub path: ::buffa::alloc::string::String, + /// Arbitrary key-value metadata. + /// /// Field 5: `metadata` pub metadata: ::buffa::__private::HashMap< ::buffa::alloc::string::String, diff --git a/examples/logging/src/gen/log.v1.log.rs b/examples/logging/src/gen/log.v1.log.rs index 8dc3eb8..70caaab 100644 --- a/examples/logging/src/gen/log.v1.log.rs +++ b/examples/logging/src/gen/log.v1.log.rs @@ -1,6 +1,7 @@ // @generated by protoc-gen-buffa. DO NOT EDIT. // source: log/v1/log.proto +/// Severity level for a log entry. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(i32)] pub enum Severity { @@ -55,18 +56,31 @@ impl ::buffa::Enumeration for Severity { } } } +/// A single structured log entry. #[derive(Clone, PartialEq, Default)] pub struct LogEntry { + /// When the log entry was created. + /// /// Field 1: `timestamp` pub timestamp: ::buffa::MessageField<::buffa_types::google::protobuf::Timestamp>, + /// Severity level. + /// /// Field 2: `severity` pub severity: ::buffa::EnumValue, + /// The log message. + /// /// Field 3: `message` pub message: ::buffa::alloc::string::String, + /// The logger name (e.g. module path). + /// /// Field 4: `logger` pub logger: ::buffa::alloc::string::String, + /// Request context, if this log was produced during request handling. + /// /// Field 5: `context` pub context: ::buffa::MessageField, + /// Structured key-value fields for machine-readable data. + /// /// Field 6: `fields` pub fields: ::buffa::__private::HashMap< ::buffa::alloc::string::String, @@ -380,6 +394,7 @@ impl ::buffa::ExtensionSet for LogEntry { &mut self.__buffa_unknown_fields } } +/// A batch of log entries, for length-delimited streaming I/O. #[derive(Clone, PartialEq, Default)] pub struct LogBatch { /// Field 1: `entries` diff --git a/scripts/gen-bootstrap-types.sh b/scripts/gen-bootstrap-types.sh index 39c0ca4..d9fdb04 100755 --- a/scripts/gen-bootstrap-types.sh +++ b/scripts/gen-bootstrap-types.sh @@ -36,7 +36,7 @@ echo "protoc: $PROTOC ($ver_str)" ROOT="$(cd "$(dirname "$0")/.." && pwd)" DESC=/tmp/buffa-descriptor-set.pb -"$PROTOC" --descriptor_set_out="$DESC" --include_imports \ +"$PROTOC" --descriptor_set_out="$DESC" --include_imports --include_source_info \ -I "$ROOT/buffa-codegen/protos" \ google/protobuf/descriptor.proto \ google/protobuf/compiler/plugin.proto