Skip to content

Commit ce91967

Browse files
committed
fmt
1 parent fc11163 commit ce91967

File tree

2 files changed

+12
-16
lines changed

2 files changed

+12
-16
lines changed

native/core/src/execution/planner.rs

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,6 @@ use datafusion_common::{
104104
tree_node::{Transformed, TransformedResult, TreeNode, TreeNodeRecursion, TreeNodeRewriter},
105105
JoinType as DFJoinType, ScalarValue,
106106
};
107-
use datafusion_execution::object_store::ObjectStoreUrl;
108107
use datafusion_expr::type_coercion::other::get_coerce_type_for_case_expression;
109108
use datafusion_expr::{
110109
AggregateUDF, ReturnTypeArgs, ScalarUDF, WindowFrame, WindowFrameBound, WindowFrameUnits,
@@ -1157,7 +1156,7 @@ impl PhysicalPlanner {
11571156

11581157
// By default, local FS object store registered
11591158
// if `hdfs` feature enabled then HDFS file object store registered
1160-
register_object_store(Arc::clone(&self.session_ctx))?;
1159+
let object_store_url = register_object_store(Arc::clone(&self.session_ctx))?;
11611160

11621161
// Generate file groups
11631162
let mut file_groups: Vec<Vec<PartitionedFile>> =
@@ -1216,8 +1215,6 @@ impl PhysicalPlanner {
12161215

12171216
// TODO: I think we can remove partition_count in the future, but leave for testing.
12181217
assert_eq!(file_groups.len(), partition_count);
1219-
1220-
let object_store_url = ObjectStoreUrl::parse("hdfs://namenode:9000").unwrap();
12211218
let partition_fields: Vec<Field> = partition_schema
12221219
.fields()
12231220
.iter()

native/core/src/parquet/parquet_support.rs

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
// specific language governing permissions and limitations
1616
// under the License.
1717

18+
use crate::execution::operators::ExecutionError;
1819
use arrow::{
1920
array::{
2021
cast::AsArray,
@@ -35,13 +36,12 @@ use arrow_array::builder::StringBuilder;
3536
use arrow_array::{DictionaryArray, StringArray, StructArray};
3637
use arrow_schema::DataType;
3738
use chrono::{NaiveDate, NaiveDateTime, TimeZone, Timelike};
39+
use datafusion::execution::object_store::ObjectStoreUrl;
40+
use datafusion::prelude::SessionContext;
3841
use datafusion_comet_spark_expr::utils::array_with_timezone;
3942
use datafusion_comet_spark_expr::{timezone, EvalMode, SparkError, SparkResult};
4043
use datafusion_common::{cast::as_generic_string_array, Result as DataFusionResult, ScalarValue};
4144
use datafusion_expr::ColumnarValue;
42-
// use datafusion_physical_expr::PhysicalExpr;
43-
use crate::execution::operators::ExecutionError;
44-
use datafusion::prelude::SessionContext;
4545
use num::{
4646
cast::AsPrimitive, integer::div_floor, traits::CheckedNeg, CheckedSub, Integer, Num,
4747
ToPrimitive,
@@ -50,7 +50,6 @@ use regex::Regex;
5050
use std::collections::HashMap;
5151
use std::str::FromStr;
5252
use std::{fmt::Debug, hash::Hash, num::Wrapping, sync::Arc};
53-
use url::Url;
5453

5554
static TIMESTAMP_FORMAT: Option<&str> = Some("%Y-%m-%d %H:%M:%S%.f");
5655

@@ -1867,29 +1866,29 @@ fn trim_end(s: &str) -> &str {
18671866
#[cfg(not(feature = "hdfs"))]
18681867
pub(crate) fn register_object_store(
18691868
session_context: Arc<SessionContext>,
1870-
) -> Result<(), ExecutionError> {
1869+
) -> Result<ObjectStoreUrl, ExecutionError> {
18711870
let object_store = object_store::local::LocalFileSystem::new();
1872-
let url = Url::try_from("file://").unwrap();
1871+
let url = ObjectStoreUrl::parse("file://").unwrap();
18731872
session_context
18741873
.runtime_env()
1875-
.register_object_store(&url, Arc::new(object_store));
1876-
Ok(())
1874+
.register_object_store((&url).as_ref(), Arc::new(object_store));
1875+
Ok(url)
18771876
}
18781877

18791878
#[cfg(feature = "hdfs")]
18801879
pub(crate) fn register_object_store(
18811880
session_context: Arc<SessionContext>,
1882-
) -> Result<(), ExecutionError> {
1881+
) -> Result<ObjectStoreUrl, ExecutionError> {
18831882
// TODO: read the namenode configuration from file schema or from spark.defaultFS
1884-
let url = Url::try_from("hdfs://namenode:9000").unwrap();
1883+
let url = ObjectStoreUrl::parse("hdfs://namenode:9000")?;
18851884
if let Some(object_store) =
18861885
datafusion_objectstore_hdfs::object_store::hdfs::HadoopFileSystem::new((&url).as_ref())
18871886
{
18881887
session_context
18891888
.runtime_env()
1890-
.register_object_store(&url, Arc::new(object_store));
1889+
.register_object_store((&url).as_ref(), Arc::new(object_store));
18911890

1892-
return Ok(());
1891+
return Ok(url);
18931892
}
18941893

18951894
Err(ExecutionError::GeneralError(format!(

0 commit comments

Comments
 (0)