@@ -916,50 +916,49 @@ def kill_cursors(cursor_ids):
916
916
class _BulkWriteContext (object ):
917
917
"""A wrapper around SocketInfo for use with write splitting functions."""
918
918
919
- __slots__ = ('db_name' , 'command' , ' sock_info' , 'op_id' ,
919
+ __slots__ = ('db_name' , 'sock_info' , 'op_id' ,
920
920
'name' , 'field' , 'publish' , 'start_time' , 'listeners' ,
921
- 'session' , 'compress' , 'op_type' , 'codec' )
921
+ 'session' , 'compress' , 'op_type' , 'codec' , 'cmd_legacy' )
922
922
923
- def __init__ (self , database_name , command , sock_info , operation_id ,
924
- listeners , session , op_type , codec ):
923
+ def __init__ (self , database_name , cmd_name , sock_info , operation_id ,
924
+ listeners , session , op_type , codec , cmd_legacy = None ):
925
925
self .db_name = database_name
926
- self .command = command
927
926
self .sock_info = sock_info
928
927
self .op_id = operation_id
929
928
self .listeners = listeners
930
929
self .publish = listeners .enabled_for_commands
931
- self .name = next ( iter ( command ))
930
+ self .name = cmd_name
932
931
self .field = _FIELD_MAP [self .name ]
933
932
self .start_time = datetime .datetime .now () if self .publish else None
934
933
self .session = session
935
934
self .compress = True if sock_info .compression_context else False
936
935
self .op_type = op_type
937
936
self .codec = codec
938
- sock_info . add_server_api ( command )
937
+ self . cmd_legacy = cmd_legacy
939
938
940
- def _batch_command (self , docs ):
939
+ def _batch_command (self , cmd , docs ):
941
940
namespace = self .db_name + '.$cmd'
942
941
request_id , msg , to_send = _do_bulk_write_command (
943
- namespace , self .op_type , self . command , docs , self .check_keys ,
942
+ namespace , self .op_type , cmd , docs , self .check_keys ,
944
943
self .codec , self )
945
944
if not to_send :
946
945
raise InvalidOperation ("cannot do an empty bulk write" )
947
946
return request_id , msg , to_send
948
947
949
- def execute (self , docs , client ):
950
- request_id , msg , to_send = self ._batch_command (docs )
951
- result = self .write_command (request_id , msg , to_send )
948
+ def execute (self , cmd , docs , client ):
949
+ request_id , msg , to_send = self ._batch_command (cmd , docs )
950
+ result = self .write_command (cmd , request_id , msg , to_send )
952
951
client ._process_response (result , self .session )
953
952
return result , to_send
954
953
955
- def execute_unack (self , docs , client ):
956
- request_id , msg , to_send = self ._batch_command (docs )
954
+ def execute_unack (self , cmd , docs , client ):
955
+ request_id , msg , to_send = self ._batch_command (cmd , docs )
957
956
# Though this isn't strictly a "legacy" write, the helper
958
957
# handles publishing commands and sending our message
959
958
# without receiving a result. Send 0 for max_doc_size
960
959
# to disable size checking. Size checking is handled while
961
960
# the documents are encoded to BSON.
962
- self .legacy_write (request_id , msg , 0 , False , to_send )
961
+ self .legacy_write (cmd , request_id , msg , 0 , False , to_send )
963
962
return to_send
964
963
965
964
@property
@@ -996,14 +995,16 @@ def legacy_bulk_insert(
996
995
request_id , msg = _compress (
997
996
2002 , msg , self .sock_info .compression_context )
998
997
return self .legacy_write (
999
- request_id , msg , max_doc_size , acknowledged , docs )
998
+ self .cmd_legacy .copy (), request_id , msg , max_doc_size ,
999
+ acknowledged , docs )
1000
1000
1001
- def legacy_write (self , request_id , msg , max_doc_size , acknowledged , docs ):
1001
+ def legacy_write (self , cmd , request_id , msg , max_doc_size , acknowledged ,
1002
+ docs ):
1002
1003
"""A proxy for SocketInfo.legacy_write that handles event publishing.
1003
1004
"""
1004
1005
if self .publish :
1005
1006
duration = datetime .datetime .now () - self .start_time
1006
- cmd = self ._start (request_id , docs )
1007
+ cmd = self ._start (cmd , request_id , docs )
1007
1008
start = datetime .datetime .now ()
1008
1009
try :
1009
1010
result = self .sock_info .legacy_write (
@@ -1032,12 +1033,12 @@ def legacy_write(self, request_id, msg, max_doc_size, acknowledged, docs):
1032
1033
self .start_time = datetime .datetime .now ()
1033
1034
return result
1034
1035
1035
- def write_command (self , request_id , msg , docs ):
1036
+ def write_command (self , cmd , request_id , msg , docs ):
1036
1037
"""A proxy for SocketInfo.write_command that handles event publishing.
1037
1038
"""
1038
1039
if self .publish :
1039
1040
duration = datetime .datetime .now () - self .start_time
1040
- self ._start (request_id , docs )
1041
+ self ._start (cmd , request_id , docs )
1041
1042
start = datetime .datetime .now ()
1042
1043
try :
1043
1044
reply = self .sock_info .write_command (request_id , msg )
@@ -1057,9 +1058,8 @@ def write_command(self, request_id, msg, docs):
1057
1058
self .start_time = datetime .datetime .now ()
1058
1059
return reply
1059
1060
1060
- def _start (self , request_id , docs ):
1061
+ def _start (self , cmd , request_id , docs ):
1061
1062
"""Publish a CommandStartedEvent."""
1062
- cmd = self .command .copy ()
1063
1063
cmd [self .field ] = docs
1064
1064
self .listeners .publish_command_start (
1065
1065
cmd , self .db_name ,
@@ -1092,10 +1092,10 @@ def _fail(self, request_id, failure, duration):
1092
1092
class _EncryptedBulkWriteContext (_BulkWriteContext ):
1093
1093
__slots__ = ()
1094
1094
1095
- def _batch_command (self , docs ):
1095
+ def _batch_command (self , cmd , docs ):
1096
1096
namespace = self .db_name + '.$cmd'
1097
1097
msg , to_send = _encode_batched_write_command (
1098
- namespace , self .op_type , self . command , docs , self .check_keys ,
1098
+ namespace , self .op_type , cmd , docs , self .check_keys ,
1099
1099
self .codec , self )
1100
1100
if not to_send :
1101
1101
raise InvalidOperation ("cannot do an empty bulk write" )
@@ -1106,17 +1106,18 @@ def _batch_command(self, docs):
1106
1106
DEFAULT_RAW_BSON_OPTIONS )
1107
1107
return cmd , to_send
1108
1108
1109
- def execute (self , docs , client ):
1110
- cmd , to_send = self ._batch_command (docs )
1109
+ def execute (self , cmd , docs , client ):
1110
+ batched_cmd , to_send = self ._batch_command (cmd , docs )
1111
1111
result = self .sock_info .command (
1112
- self .db_name , cmd , codec_options = _UNICODE_REPLACE_CODEC_OPTIONS ,
1112
+ self .db_name , batched_cmd ,
1113
+ codec_options = _UNICODE_REPLACE_CODEC_OPTIONS ,
1113
1114
session = self .session , client = client )
1114
1115
return result , to_send
1115
1116
1116
- def execute_unack (self , docs , client ):
1117
- cmd , to_send = self ._batch_command (docs )
1117
+ def execute_unack (self , cmd , docs , client ):
1118
+ batched_cmd , to_send = self ._batch_command (cmd , docs )
1118
1119
self .sock_info .command (
1119
- self .db_name , cmd , write_concern = WriteConcern (w = 0 ),
1120
+ self .db_name , batched_cmd , write_concern = WriteConcern (w = 0 ),
1120
1121
session = self .session , client = client )
1121
1122
return to_send
1122
1123
0 commit comments