@@ -379,13 +379,19 @@ class TransportLayerASIO::ASIOSession final : public Session {
379
379
#ifdef MONGO_CONFIG_SSL
380
380
_ranHandshake = true ;
381
381
if (_sslSocket) {
382
+ #ifdef __linux__
383
+ // We do some trickery in asio (see moreToSend), which appears to work well on linux,
384
+ // but fails on other platforms.
385
+ return opportunisticWrite (*_sslSocket, buffers);
386
+ #else
382
387
if (_blockingMode == Async) {
383
388
// Opportunistic writes are broken for async egress SSL (switching between blocking
384
389
// and non-blocking mode corrupts the TLS exchange).
385
390
return asio::async_write (*_sslSocket, buffers, UseFuture{});
386
391
} else {
387
392
return opportunisticWrite (*_sslSocket, buffers);
388
393
}
394
+ #endif
389
395
}
390
396
#endif
391
397
return opportunisticWrite (_socket, buffers);
@@ -414,19 +420,60 @@ class TransportLayerASIO::ASIOSession final : public Session {
414
420
}
415
421
}
416
422
423
+ /* *
424
+ * moreToSend checks the ssl socket after an opportunisticWrite. If there are still bytes to
425
+ * send, we manually send them off the underlying socket. Then we hook that up with a future
426
+ * that gets us back to sending from the ssl side.
427
+ *
428
+ * There are two variants because we call opportunisticWrite on generic sockets and ssl sockets.
429
+ * The generic socket impl never has more to send (because it doesn't have an inner socket it
430
+ * needs to keep sending).
431
+ */
432
+ template <typename ConstBufferSequence>
433
+ boost::optional<Future<size_t >> moreToSend (GenericSocket& socket,
434
+ const ConstBufferSequence& buffers,
435
+ size_t size) {
436
+ return boost::none;
437
+ }
438
+
439
+ #ifdef MONGO_CONFIG_SSL
440
+ template <typename ConstBufferSequence>
441
+ boost::optional<Future<size_t >> moreToSend (asio::ssl::stream<GenericSocket>& socket,
442
+ const ConstBufferSequence& buffers,
443
+ size_t sizeFromBefore) {
444
+ if (_sslSocket->getCoreOutputBuffer ().size ()) {
445
+ return opportunisticWrite (getSocket (), _sslSocket->getCoreOutputBuffer ())
446
+ .then ([this , &socket, buffers, sizeFromBefore](size_t ) {
447
+ return opportunisticWrite (socket, buffers)
448
+ .then ([sizeFromBefore](size_t justWritten) {
449
+ return justWritten + sizeFromBefore;
450
+ });
451
+ });
452
+ }
453
+
454
+ return boost::none;
455
+ }
456
+ #endif
457
+
417
458
template <typename Stream, typename ConstBufferSequence>
418
459
Future<size_t > opportunisticWrite (Stream& stream, const ConstBufferSequence& buffers) {
419
460
std::error_code ec;
420
461
auto size = asio::write (stream, buffers, ec);
421
462
if (((ec == asio::error::would_block) || (ec == asio::error::try_again)) &&
422
463
(_blockingMode == Async)) {
464
+
423
465
// asio::write is a loop internally, so some of buffers may have been read into already.
424
466
// So we need to adjust the buffers passed into async_write to be offset by size, if
425
467
// size is > 0.
426
468
ConstBufferSequence asyncBuffers (buffers);
427
469
if (size > 0 ) {
428
470
asyncBuffers += size;
429
471
}
472
+
473
+ if (auto more = moreToSend (stream, asyncBuffers, size)) {
474
+ return std::move (*more);
475
+ }
476
+
430
477
return asio::async_write (stream, asyncBuffers, UseFuture{})
431
478
.then ([size](size_t asyncSize) {
432
479
// Add back in the size written opportunistically.
0 commit comments