@@ -19,6 +19,7 @@ import { ChildProcessMonitor } from 'vs/platform/terminal/node/childProcessMonit
19
19
import { findExecutable , getShellIntegrationInjection , getWindowsBuildNumber , IShellIntegrationConfigInjection } from 'vs/platform/terminal/node/terminalEnvironment' ;
20
20
import { WindowsShellHelper } from 'vs/platform/terminal/node/windowsShellHelper' ;
21
21
import { IPty , IPtyForkOptions , IWindowsPtyForkOptions , spawn } from 'node-pty' ;
22
+ import { chunkInput } from 'vs/platform/terminal/common/terminalProcess' ;
22
23
23
24
const enum ShutdownConstants {
24
25
/**
@@ -54,14 +55,6 @@ const enum Constants {
54
55
* interval.
55
56
*/
56
57
KillSpawnSpacingDuration = 50 ,
57
-
58
- /**
59
- * Writing large amounts of data can be corrupted for some reason, after looking into this is
60
- * appears to be a race condition around writing to the FD which may be based on how powerful
61
- * the hardware is. The workaround for this is to space out when large amounts of data is being
62
- * written to the terminal. See https://github.com/microsoft/vscode/issues/38137
63
- */
64
- WriteMaxChunkSize = 50 ,
65
58
/**
66
59
* How long to wait between chunk writes.
67
60
*/
@@ -645,31 +638,3 @@ class DelayedResizer extends Disposable {
645
638
this . _register ( toDisposable ( ( ) => clearTimeout ( this . _timeout ) ) ) ;
646
639
}
647
640
}
648
-
649
- /**
650
- * Splits incoming pty data into chunks to try prevent data corruption that could occur when pasting
651
- * large amounts of data.
652
- */
653
- export function chunkInput ( data : string ) : string [ ] {
654
- const chunks : string [ ] = [ ] ;
655
- let nextChunkStartIndex = 0 ;
656
- for ( let i = 0 ; i < data . length - 1 ; i ++ ) {
657
- if (
658
- // If the max chunk size is reached
659
- i - nextChunkStartIndex + 1 >= Constants . WriteMaxChunkSize ||
660
- // If the next character is ESC, send the pending data to avoid splitting the escape
661
- // sequence.
662
- data [ i + 1 ] === '\x1b'
663
- ) {
664
- chunks . push ( data . substring ( nextChunkStartIndex , i + 1 ) ) ;
665
- nextChunkStartIndex = i + 1 ;
666
- // Skip the next character as the chunk would be a single character
667
- i ++ ;
668
- }
669
- }
670
- // Push final chunk
671
- if ( nextChunkStartIndex !== data . length ) {
672
- chunks . push ( data . substring ( nextChunkStartIndex ) ) ;
673
- }
674
- return chunks ;
675
- }
0 commit comments