@@ -644,6 +644,7 @@ def to_sql(
644644 primary_keys : Optional [List [str ]] = None ,
645645 varchar_lengths_default : int = 256 ,
646646 varchar_lengths : Optional [Dict [str , int ]] = None ,
647+ use_column_names : bool = False ,
647648) -> None :
648649 """Write records stored in a DataFrame into Redshift.
649650
@@ -688,6 +689,10 @@ def to_sql(
688689 The size that will be set for all VARCHAR columns not specified with varchar_lengths.
689690 varchar_lengths : Dict[str, int], optional
690691 Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}).
692+ use_column_names: bool
693+ If set to True, will use the column names of the DataFrame for generating the INSERT SQL Query.
694+ E.g. If the DataFrame has two columns `col1` and `col3` and `use_column_names` is True, data will only be
695+ inserted into the database columns `col1` and `col3`.
691696
692697 Returns
693698 -------
@@ -737,7 +742,10 @@ def to_sql(
737742 df .reset_index (level = df .index .names , inplace = True )
738743 placeholders : str = ", " .join (["%s" ] * len (df .columns ))
739744 schema_str = f'"{ created_schema } ".' if created_schema else ""
740- sql : str = f'INSERT INTO { schema_str } "{ created_table } " VALUES ({ placeholders } )'
745+ insertion_columns = ""
746+ if use_column_names :
747+ insertion_columns = f"({ ', ' .join (df .columns )} )"
748+ sql : str = f'INSERT INTO { schema_str } "{ created_table } " { insertion_columns } VALUES ({ placeholders } )'
741749 _logger .debug ("sql: %s" , sql )
742750 parameters : List [List [Any ]] = _db_utils .extract_parameters (df = df )
743751 cursor .executemany (sql , parameters )
0 commit comments