Skip to content

Commit d3e8073

Browse files
committed
trivial typo fix
Signed-off-by: Ruihang Xia <[email protected]>
1 parent 51718dc commit d3e8073

File tree

19 files changed

+38
-35
lines changed

19 files changed

+38
-35
lines changed

benchmarks/src/bin/external_aggr.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ impl ExternalAggrConfig {
113113
"#,
114114
];
115115

116-
/// If `--query` and `--memory-limit` is not speicified, run all queries
116+
/// If `--query` and `--memory-limit` is not specified, run all queries
117117
/// with pre-configured memory limits
118118
/// If only `--query` is specified, run the query with all memory limits
119119
/// for this query

datafusion-cli/tests/cli_integration.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ async fn setup_minio_container() -> ContainerAsync<minio::MinIO> {
6464

6565
match container {
6666
Ok(container) => {
67-
// We wait for MinIO to be healthy and preprare test files. We do it via CLI to avoid s3 dependency
67+
// We wait for MinIO to be healthy and prepare test files. We do it via CLI to avoid s3 dependency
6868
let commands = [
6969
ExecCommand::new(["/usr/bin/mc", "ready", "local"]),
7070
ExecCommand::new([

datafusion-examples/examples/custom_file_casts.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ use object_store::path::Path;
4141
use object_store::{ObjectStore, PutPayload};
4242

4343
// Example showing how to implement custom casting rules to adapt file schemas.
44-
// This example enforces that casts must be stricly widening: if the file type is Int64 and the table type is Int32, it will error
44+
// This example enforces that casts must be strictly widening: if the file type is Int64 and the table type is Int32, it will error
4545
// before even reading the data.
4646
// Without this custom cast rule DataFusion would happily do the narrowing cast, potentially erroring only if it found a row with data it could not cast.
4747

datafusion/core/src/datasource/listing/table.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -215,16 +215,16 @@ impl ListingTableConfig {
215215
) -> Result<(String, Option<String>)> {
216216
let mut exts = path.rsplit('.');
217217

218-
let splitted = exts.next().unwrap_or("");
218+
let split = exts.next().unwrap_or("");
219219

220-
let file_compression_type = FileCompressionType::from_str(splitted)
220+
let file_compression_type = FileCompressionType::from_str(split)
221221
.unwrap_or(FileCompressionType::UNCOMPRESSED);
222222

223223
if file_compression_type.is_compressed() {
224224
let splitted2 = exts.next().unwrap_or("");
225-
Ok((splitted2.to_string(), Some(splitted.to_string())))
225+
Ok((splitted2.to_string(), Some(split.to_string())))
226226
} else {
227-
Ok((splitted.to_string(), None))
227+
Ok((split.to_string(), None))
228228
}
229229
}
230230

datafusion/core/tests/memory_limit/memory_limit_validation/sort_mem_validation.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ static INIT: Once = Once::new();
3131

3232
// ===========================================================================
3333
// Test runners:
34-
// Runners are splitted into multiple tests to run in parallel
34+
// Runners are split into multiple tests to run in parallel
3535
// ===========================================================================
3636

3737
#[test]

datafusion/core/tests/memory_limit/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -546,7 +546,7 @@ async fn test_external_sort_zero_merge_reservation() {
546546
// Tests for disk limit (`max_temp_directory_size` in `DiskManager`)
547547
// ------------------------------------------------------------------
548548

549-
// Create a new `SessionContext` with speicified disk limit, memory pool limit, and spill compression codec
549+
// Create a new `SessionContext` with specified disk limit, memory pool limit, and spill compression codec
550550
async fn setup_context(
551551
disk_limit: u64,
552552
memory_pool_limit: usize,

datafusion/core/tests/physical_optimizer/enforce_distribution.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2610,7 +2610,7 @@ fn parallelization_two_partitions_into_four() -> Result<()> {
26102610
"AggregateExec: mode=FinalPartitioned, gby=[a@0 as a], aggr=[]",
26112611
" RepartitionExec: partitioning=Hash([a@0], 4), input_partitions=4",
26122612
" AggregateExec: mode=Partial, gby=[a@0 as a], aggr=[]",
2613-
// Multiple source files splitted across partitions
2613+
// Multiple source files split across partitions
26142614
" DataSourceExec: file_groups={4 groups: [[x:0..50], [x:50..100], [y:0..50], [y:50..100]]}, projection=[a, b, c, d, e], file_type=parquet",
26152615
];
26162616
test_config.run(
@@ -2625,7 +2625,7 @@ fn parallelization_two_partitions_into_four() -> Result<()> {
26252625
"AggregateExec: mode=FinalPartitioned, gby=[a@0 as a], aggr=[]",
26262626
" RepartitionExec: partitioning=Hash([a@0], 4), input_partitions=4",
26272627
" AggregateExec: mode=Partial, gby=[a@0 as a], aggr=[]",
2628-
// Multiple source files splitted across partitions
2628+
// Multiple source files split across partitions
26292629
" DataSourceExec: file_groups={4 groups: [[x:0..50], [x:50..100], [y:0..50], [y:50..100]]}, projection=[a, b, c, d, e], file_type=csv, has_header=false",
26302630
];
26312631
test_config.run(&expected_csv, plan_csv.clone(), &DISTRIB_DISTRIB_SORT)?;

datafusion/datasource-parquet/src/opener.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ impl FileOpener for ParquetOpener {
186186

187187
// Don't load the page index yet. Since it is not stored inline in
188188
// the footer, loading the page index if it is not needed will do
189-
// unecessary I/O. We decide later if it is needed to evaluate the
189+
// unnecessary I/O. We decide later if it is needed to evaluate the
190190
// pruning predicates. Thus default to not requesting if from the
191191
// underlying reader.
192192
let mut options = ArrowReaderOptions::new().with_page_index(false);

datafusion/expr/src/logical_plan/builder.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1520,7 +1520,7 @@ impl ValuesFields {
15201520
// `name_map` tracks a mapping between a field name and the number of appearances of that field.
15211521
//
15221522
// Some field names might already come to this function with the count (number of times it appeared)
1523-
// as a sufix e.g. id:1, so there's still a chance of name collisions, for example,
1523+
// as a suffix e.g. id:1, so there's still a chance of name collisions, for example,
15241524
// if these three fields passed to this function: "col:1", "col" and "col", the function
15251525
// would rename them to -> col:1, col, col:1 causing a posteriror error when building the DFSchema.
15261526
// that's why we need the `seen` set, so the fields are always unique.

datafusion/expr/src/logical_plan/invariants.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ fn assert_unique_field_names(plan: &LogicalPlan) -> Result<()> {
102102
plan.schema().check_names()
103103
}
104104

105-
/// Returns an error if the plan is not sematically valid.
105+
/// Returns an error if the plan is not semantically valid.
106106
fn assert_valid_semantic_plan(plan: &LogicalPlan) -> Result<()> {
107107
assert_subqueries_are_valid(plan)?;
108108

0 commit comments

Comments
 (0)