Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
108 changes: 41 additions & 67 deletions lib/read.js
Original file line number Diff line number Diff line change
Expand Up @@ -64,61 +64,68 @@ function read (req, res, next, parse, debug, options) {
return
}

var encoding = null
var charset = null
if (options?.skipCharset !== true) {
encoding = getCharset(req) || options.defaultCharset
charset = getCharset(req) || options.defaultCharset

// validate charset
if (!!options?.isValidCharset && !options.isValidCharset(encoding)) {
if (!!options?.isValidCharset && !options.isValidCharset(charset)) {
debug('invalid charset')
next(createError(415, 'unsupported charset "' + encoding.toUpperCase() + '"', {
charset: encoding,
next(createError(415, 'unsupported charset "' + charset.toUpperCase() + '"', {
charset,
type: 'charset.unsupported'
}))
return
}
}

var length
var opts = options
var stream
// get the content stream
const contentEncoding = (req.headers['content-encoding'] || 'identity').toLowerCase()
debug('content-encoding "%s"', contentEncoding)

// read options
var verify = opts.verify

try {
// get the content stream
stream = contentstream(req, debug, opts.inflate)
length = stream.length
stream.length = undefined
} catch (err) {
return next(err)
if (options.inflate === false && contentEncoding !== 'identity') {
return next(createError(415, 'content encoding unsupported', {
encoding: contentEncoding,
type: 'encoding.unsupported'
}))
}

// set raw-body options
opts.length = length
opts.encoding = verify
? null
: encoding
let stream
if (contentEncoding === 'identity') {
// set raw-body expected length
stream = req
options.length = req.headers['content-length']
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Improves the code by only accessing req.headers['content-length'] and setting the length option for raw-body when the stream needs no decompression

} else {
try {
stream = createDecompressionStream(contentEncoding, debug)
req.pipe(stream)
options.length = undefined
} catch (err) {
return next(err)
}
}

// assert charset is supported
if (opts.encoding === null && encoding !== null && !iconv.encodingExists(encoding)) {
return next(createError(415, 'unsupported charset "' + encoding.toUpperCase() + '"', {
charset: encoding.toLowerCase(),
if (options.verify && charset !== null && !iconv.encodingExists(charset)) {
Copy link

Copilot AI Sep 8, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The charset validation logic has changed incorrectly. Previously, this check was performed when opts.encoding === null (which occurred when verify was truthy), but now it's only performed when options.verify is truthy AND charset is not null. This could skip charset validation when verify is falsy but charset needs to be validated for decoding.

Suggested change
if (options.verify && charset !== null && !iconv.encodingExists(charset)) {
if (charset !== null && !iconv.encodingExists(charset)) {

Copilot uses AI. Check for mistakes.

return next(createError(415, 'unsupported charset "' + charset.toUpperCase() + '"', {
charset: charset.toLowerCase(),
type: 'charset.unsupported'
}))
}

// set raw-body encoding
options.encoding = options.verify ? null : charset

// read body
debug('read body')
getBody(stream, opts, function (error, body) {
getBody(stream, options, function (error, body) {
if (error) {
var _error

if (error.type === 'encoding.unsupported') {
// echo back charset
_error = createError(415, 'unsupported charset "' + encoding.toUpperCase() + '"', {
charset: encoding.toLowerCase(),
_error = createError(415, 'unsupported charset "' + charset.toUpperCase() + '"', {
charset: charset.toLowerCase(),
type: 'charset.unsupported'
})
} else {
Expand All @@ -140,10 +147,10 @@ function read (req, res, next, parse, debug, options) {
}

// verify
if (verify) {
if (options.verify) {
try {
debug('verify body')
verify(req, res, body, encoding)
options.verify(req, res, body, charset)
} catch (err) {
next(createError(403, err, {
body: body,
Expand All @@ -157,10 +164,10 @@ function read (req, res, next, parse, debug, options) {
var str = body
try {
debug('parse body')
str = typeof body !== 'string' && encoding !== null
? iconv.decode(body, encoding)
str = typeof body !== 'string' && charset !== null
? iconv.decode(body, charset)
: body
req.body = parse(str, encoding)
req.body = parse(str, charset)
} catch (err) {
next(createError(400, err, {
body: str,
Expand All @@ -173,39 +180,6 @@ function read (req, res, next, parse, debug, options) {
})
}

/**
* Get the content stream of the request.
*
* @param {object} req
* @param {function} debug
* @param {boolean} [inflate=true]
* @return {object}
* @api private
*/

function contentstream (req, debug, inflate) {
var encoding = (req.headers['content-encoding'] || 'identity').toLowerCase()
var length = req.headers['content-length']

debug('content-encoding "%s"', encoding)

if (inflate === false && encoding !== 'identity') {
throw createError(415, 'content encoding unsupported', {
encoding: encoding,
type: 'encoding.unsupported'
})
}

if (encoding === 'identity') {
req.length = length
return req
}

var stream = createDecompressionStream(encoding, debug)
req.pipe(stream)
return stream
}
Comment on lines -176 to -207
Copy link
Member Author

@Phillip9587 Phillip9587 Feb 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This can be seen as follow-up to #564. I included the contentstream function in the main read function for multiple reasons:

  • to prevent the req object from being altered by setting length and resetting it in read.
  • This also improves code by only setting the raw-body length option and thus also only calling req.headers['content-length'] when the requests needs no inflation.


/**
* Create a decompression stream for the given encoding.
* @param {string} encoding
Expand Down
Loading