Skip to content

Commit d04a737

Browse files
viiryadongjoon-hyun
authored andcommitted
[MINOR][DOC][SQL] Remove out-of-date doc about ORC in DataFrameReader and Writer
## What changes were proposed in this pull request? According to current status, `orc` is available even Hive support isn't enabled. This is a minor doc change to reflect it. ## How was this patch tested? Doc only change. Closes apache#24280 from viirya/fix-orc-doc. Authored-by: Liang-Chi Hsieh <[email protected]> Signed-off-by: Dongjoon Hyun <[email protected]>
1 parent 1bc6723 commit d04a737

File tree

3 files changed

+0
-7
lines changed

3 files changed

+0
-7
lines changed

python/pyspark/sql/readwriter.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -509,8 +509,6 @@ def func(iterator):
509509
def orc(self, path):
510510
"""Loads ORC files, returning the result as a :class:`DataFrame`.
511511
512-
.. note:: Currently ORC support is only available together with Hive support.
513-
514512
>>> df = spark.read.orc('python/test_support/sql/orc_partitioned')
515513
>>> df.dtypes
516514
[('a', 'bigint'), ('b', 'int'), ('c', 'int')]
@@ -950,8 +948,6 @@ def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=No
950948
def orc(self, path, mode=None, partitionBy=None, compression=None):
951949
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
952950
953-
.. note:: Currently ORC support is only available together with Hive support.
954-
955951
:param path: the path in any Hadoop supported file system
956952
:param mode: specifies the behavior of the save operation when data already exists.
957953

sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -679,7 +679,6 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
679679
*
680680
* @param path input path
681681
* @since 1.5.0
682-
* @note Currently, this method can only be used after enabling Hive support.
683682
*/
684683
def orc(path: String): DataFrame = {
685684
// This method ensures that calls that explicit need single argument works, see SPARK-16009
@@ -691,7 +690,6 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
691690
*
692691
* @param paths input paths
693692
* @since 2.0.0
694-
* @note Currently, this method can only be used after enabling Hive support.
695693
*/
696694
@scala.annotation.varargs
697695
def orc(paths: String*): DataFrame = format("orc").load(paths: _*)

sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -615,7 +615,6 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
615615
* </ul>
616616
*
617617
* @since 1.5.0
618-
* @note Currently, this method can only be used after enabling Hive support
619618
*/
620619
def orc(path: String): Unit = {
621620
format("orc").save(path)

0 commit comments

Comments
 (0)