-
Notifications
You must be signed in to change notification settings - Fork 28
Remove the default collected output buffer limit and throw an error when the limit is reached #130
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -158,13 +158,25 @@ public struct BytesOutput: OutputProtocol { | |
var result: [UInt8]? = nil | ||
#endif | ||
do { | ||
result = try await AsyncIO.shared.read(from: diskIO, upTo: self.maxSize) | ||
var maxLength = self.maxSize | ||
if maxLength != .max { | ||
// If we actually have a max length, attempt to read one | ||
// more byte to determine whether output exceeds the limit | ||
maxLength += 1 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does this mean that we've drained 1 more byte from the I/O than the caller expected? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes. Unfortunately this is the only way to detect whether there's more data coming. If the caller was only expected N bytes, then the child process should only process N bytes with EOF at the end. If not then we considered this an error. |
||
} | ||
result = try await AsyncIO.shared.read(from: diskIO, upTo: maxLength) | ||
} catch { | ||
try diskIO.safelyClose() | ||
throw error | ||
} | ||
|
||
try diskIO.safelyClose() | ||
|
||
if let result, result.count > self.maxSize { | ||
throw SubprocessError( | ||
code: .init(.outputBufferLimitExceeded(self.maxSize)), | ||
underlyingError: nil | ||
) | ||
} | ||
#if canImport(Darwin) | ||
return result?.array() ?? [] | ||
#else | ||
|
@@ -213,16 +225,19 @@ extension OutputProtocol where Self == FileDescriptorOutput { | |
} | ||
|
||
extension OutputProtocol where Self == StringOutput<UTF8> { | ||
/// Create a `Subprocess` output that collects output as | ||
/// UTF8 String with 128kb limit. | ||
public static var string: Self { | ||
.init(limit: 128 * 1024, encoding: UTF8.self) | ||
/// Create a `Subprocess` output that collects output as UTF8 String | ||
iCharlesHu marked this conversation as resolved.
Show resolved
Hide resolved
|
||
/// with a buffer limit in bytes. Subprocess throws an error if the | ||
/// child process emits more bytes than the limit. | ||
public static func string(limit: Int) -> Self { | ||
return .init(limit: limit, encoding: UTF8.self) | ||
} | ||
} | ||
|
||
extension OutputProtocol { | ||
/// Create a `Subprocess` output that collects output as | ||
/// `String` using the given encoding up to limit it bytes. | ||
/// `String` using the given encoding up to limit in bytes. | ||
/// Subprocess throws an error if the child process emits | ||
/// more bytes than the limit. | ||
public static func string<Encoding: Unicode.Encoding>( | ||
limit: Int, | ||
encoding: Encoding.Type | ||
|
@@ -234,11 +249,8 @@ extension OutputProtocol { | |
|
||
extension OutputProtocol where Self == BytesOutput { | ||
/// Create a `Subprocess` output that collects output as | ||
/// `Buffer` with 128kb limit. | ||
public static var bytes: Self { .init(limit: 128 * 1024) } | ||
|
||
/// Create a `Subprocess` output that collects output as | ||
/// `Buffer` up to limit it bytes. | ||
/// `Buffer` with a buffer limit in bytes. Subprocess throws | ||
/// an error if the child process emits more bytes than the limit. | ||
public static func bytes(limit: Int) -> Self { | ||
return .init(limit: limit) | ||
} | ||
|
@@ -299,13 +311,25 @@ extension OutputProtocol { | |
var result: [UInt8]? = nil | ||
#endif | ||
do { | ||
result = try await AsyncIO.shared.read(from: diskIO, upTo: self.maxSize) | ||
var maxLength = self.maxSize | ||
if maxLength != .max { | ||
// If we actually have a max length, attempt to read one | ||
// more byte to determine whether output exceeds the limit | ||
maxLength += 1 | ||
} | ||
result = try await AsyncIO.shared.read(from: diskIO, upTo: maxLength) | ||
} catch { | ||
try diskIO.safelyClose() | ||
throw error | ||
} | ||
|
||
try diskIO.safelyClose() | ||
if let result, result.count > self.maxSize { | ||
throw SubprocessError( | ||
code: .init(.outputBufferLimitExceeded(self.maxSize)), | ||
underlyingError: nil | ||
) | ||
} | ||
#if canImport(Darwin) | ||
return try self.output(from: result ?? .empty) | ||
#else | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
After we set the API we should probably consider the error numbers to be frozen, in case people have written documentation about them or compare them to raw values.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I agree. I'm only doing this because we haven't tagged 0.0.1 yet.