Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/json5-trailing-comma-formatter/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use json_five::rt::parser::{from_str, ArrayValueContext, JSONValue, KeyValuePair

fn format_value(val: &mut JSONValue) {
match val {
JSONValue::JSONObject { key_value_pairs, context } => {
JSONValue::JSONObject { key_value_pairs, .. } => {
let length = key_value_pairs.len();
for (idx, kvp) in key_value_pairs.iter_mut().enumerate() {
match kvp.value {
Expand Down
18 changes: 17 additions & 1 deletion src/de.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use serde::de::{self, DeserializeSeed, Deserialize, Deserializer, MapAccess, SeqAccess, VariantAccess, Visitor};
use std::fmt;
use crate::parser::{JSONValue, JSONKeyValuePair, UnaryOperator, from_str as model_from_str};
use crate::parser::{JSONValue, JSONKeyValuePair, UnaryOperator, from_str as model_from_str, from_bytes as model_from_bytes};
use crate::utils::unescape;
#[derive(Debug)]
pub enum SerdeJSON5Error {
Expand Down Expand Up @@ -579,6 +579,22 @@ where
T::deserialize(deserializer)
}

pub fn from_bytes<'de, T>(s: &'de [u8]) -> Result<T, SerdeJSON5Error>
where
T: Deserialize<'de>,
{
// 1) Parse the string into your JSONText
let parsed = model_from_bytes(s).map_err(|err| SerdeJSON5Error::Custom(err.to_string()))?;

// 2) Wrap the JSONValue in our deserializer
let deserializer = JSONValueDeserializer {
input: &parsed.value,
};

// 3) Deserialize into the caller’s type T
T::deserialize(deserializer)
}

#[cfg(test)]
mod test {
use std::collections::HashMap;
Expand Down
21 changes: 7 additions & 14 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#![doc = include_str!("../README.md")]

/// The default performance-focused tokenizer
pub mod tokenize;

Expand All @@ -16,35 +18,26 @@ pub mod de;
#[cfg(feature = "serde")]
pub mod ser;

/// The `serde` deserializer
#[cfg(feature = "serde")]
pub use de::{from_str, JSONValueDeserializer};
pub use de::{from_str, from_bytes, JSONValueDeserializer};

/// the `serde` serializer
#[cfg(feature = "serde")]
pub use ser::{to_string, to_string_formatted, Serializer};

/// turn your strings into abstract JSON5 model (AST)
pub use parser::from_str as model_from_str;

/// Turn [crate::tokenize::Tokens] into AST
pub use parser::{from_str as model_from_str, from_bytes as model_from_bytes};

pub use parser::from_tokens as model_from_tokens;

/// formatting configuration for use with [crate::to_string_formatted]
pub use parser::{FormatConfiguration, TrailingComma};

/// turn str/bytes into [crate::tokenize::Tokens]
pub use tokenize::{tokenize_bytes, tokenize_str};
pub use tokenize::{tokenize_bytes, tokenize_str, tokenize_rt_str, tokenize_rt_bytes};

/// round-trip source (str) into [crate::rt::tokenize::Token]s
pub use rt::tokenize::{tokens_to_source, source_to_tokens};


/// The round-tripping module
pub mod rt {
pub mod parser;
pub mod tokenize;
}

#[doc = include_str!("../README.md")]
#[cfg(doctest)]
pub struct ReadmeDoctests;
23 changes: 23 additions & 0 deletions src/parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -580,11 +580,34 @@ pub fn from_str<'input>(source: &'input str) -> Result<JSONText<'input>, Parsing
}
}

/// Like [from_str] but for bytes
pub fn from_bytes<'input>(source: &'input [u8]) -> Result<JSONText<'input>, ParsingError> {
use crate::tokenize::tokenize_bytes;
let maybe_toks = tokenize_bytes(source);
match maybe_toks {
Err(e) => {
Err(ParsingError{index: e.index, message: e.message, char_index: e.char_index, lineno: e.lineno, colno: e.colno})
}
Ok(toks) => {
from_tokens(&toks)
}
}
}

#[cfg(test)]
mod tests {
use crate::tokenize::Tokenizer;
use crate::parser::JSONValue::*;
use super::*;

#[test]
fn test_from_bytes() {
let res = from_bytes(b"{}").unwrap();
let expected = JSONText{value: JSONValue::JSONObject {key_value_pairs: vec![]}};
assert_eq!(res, expected)

}

#[test]
fn test_foo() {
let res = from_str("{}").unwrap();
Expand Down
5 changes: 4 additions & 1 deletion src/tokenize.rs
Original file line number Diff line number Diff line change
Expand Up @@ -640,16 +640,18 @@ impl<'input> Iterator for Tokenizer<'input> {
/// Turn str into [Tokens].
///
/// Usually not used directly.
/// Token spans will not include whitespace and comment tokens
pub fn tokenize_str(text: &'_ str) -> Result<Tokens<'_>, TokenizationError> {
Tokenizer::new(text).tokenize()
}

/// Like [tokenize_str] but includes whitespace and comment tokens
pub fn tokenize_rt_str(text: &'_ str) -> Result<Tokens<'_>, TokenizationError> {
let config = TokenizerConfig{include_comments: true, include_whitespace: true, allow_octal: false};
Tokenizer::with_configuration(text, config).tokenize()
}

/// Tokenize bytes to [Tokens]
/// Like [tokenize_str] but for bytes
pub fn tokenize_bytes(bytes: &'_ [u8]) -> Result<Tokens<'_>, TokenizationError> {
let maybe_text = std::str::from_utf8(bytes);
match maybe_text {
Expand All @@ -669,6 +671,7 @@ pub fn tokenize_bytes(bytes: &'_ [u8]) -> Result<Tokens<'_>, TokenizationError>
}
}

/// Like [tokenize_rt_str] but for bytes
pub fn tokenize_rt_bytes(bytes: &'_ [u8]) -> Result<Tokens<'_>, TokenizationError> {
let maybe_text = std::str::from_utf8(bytes);
match maybe_text {
Expand Down