Skip to content

Commit f32cb53

Browse files
committed
Addressed performance issue on Bulk Copy API with batch insert
1 parent 1ed161f commit f32cb53

File tree

2 files changed

+3
-13
lines changed

2 files changed

+3
-13
lines changed

src/main/java/com/microsoft/sqlserver/jdbc/SQLServerBulkBatchInsertRecord.java

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -189,18 +189,7 @@ else if (dateTimeFormatter != null)
189189
case Types.LONGVARCHAR:
190190
case Types.NCHAR:
191191
case Types.NVARCHAR:
192-
case Types.LONGNVARCHAR: {
193-
/*
194-
* If string data comes in as a byte array through setString (and sendStringParametersAsUnicode = false)
195-
* through Bulk Copy for Batch Insert API, convert the byte array to a string.
196-
* If the data is already a string, return it as is.
197-
*/
198-
if (data instanceof byte[]) {
199-
return new String((byte[]) data, charset);
200-
}
201-
return data;
202-
}
203-
192+
case Types.LONGNVARCHAR:
204193
case Types.DATE:
205194
case Types.CLOB:
206195
default: {

src/main/java/com/microsoft/sqlserver/jdbc/dtv.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2063,7 +2063,8 @@ else if (type.isBinary()) {
20632063
// then do the conversion now so that the decision to use a "short" or "long"
20642064
// SSType (i.e. VARCHAR vs. TEXT/VARCHAR(max)) is based on the exact length of
20652065
// the MBCS value (in bytes).
2066-
else if (null != collation && (JDBCType.CHAR == type || JDBCType.VARCHAR == type
2066+
// If useBulkCopyForBatchInsert is true, conversion to byte array is not done due to performance
2067+
else if ((con.getUseBulkCopyForBatchInsert() == false) && null != collation && (JDBCType.CHAR == type || JDBCType.VARCHAR == type
20672068
|| JDBCType.LONGVARCHAR == type || JDBCType.CLOB == type)) {
20682069
byte[] nativeEncoding = null;
20692070

0 commit comments

Comments
 (0)