Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions esp/services/ws_dfu/ws_dfuService.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6520,6 +6520,7 @@ bool CWsDfuEx::onDFUFileCreateV2(IEspContext &context, IEspDFUFileCreateV2Reques
break;
case CDFUFileType_Index:
fileType = "key";
break;
default:
throw makeStringExceptionV(ECLWATCH_MISSING_FILETYPE, "DFUFileCreateV2: File type not provided");
}
Expand Down Expand Up @@ -6568,7 +6569,15 @@ bool CWsDfuEx::onDFUFileCreateV2(IEspContext &context, IEspDFUFileCreateV2Reques
Owned<IGroup> group = queryNamedGroupStore().lookup(groupName.str());
if (!group)
throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreateV2: Failed to get Group %s.", groupName.str());

fileDesc.setown(createFileDescriptor(tempFileName, clusterTypeString(clusterType, false), groupName, group));
if (kind == CDFUFileType_Index)
{
uint tlkPartIndex = fileDesc->numParts();
fileDesc->setNumParts(fileDesc->numParts() + 1);
fileDesc->queryPart(tlkPartIndex)->queryProperties().setProp("@kind", "topLevelKey");
}

// NB: if file has copies on >1 cluster, they must share the same key
std::vector<std::string> groups;
groups.push_back(groupName.str());
Expand Down
272 changes: 267 additions & 5 deletions fs/dafsserver/dafsserver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@
#include "rmtfile.hpp"
#include "rmtclient_impl.hpp"
#include "dafsserver.hpp"
#include "keybuild.hpp"
#include "eclhelper_base.hpp"
#include "thorfile.hpp"

#include "ftslavelib.hpp"
#include "filecopy.hpp"
Expand Down Expand Up @@ -2575,8 +2578,6 @@ class CRemoteWriteBaseActivity : public CSimpleInterfaceOf<IRemoteWriteActivity>
return this;
}
};


Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

trivial: leave one line, consistent with other spacing between classes.

class CRemoteDiskWriteActivity : public CRemoteWriteBaseActivity
{
typedef CRemoteWriteBaseActivity PARENT;
Expand Down Expand Up @@ -2655,6 +2656,255 @@ class CRemoteDiskWriteActivity : public CRemoteWriteBaseActivity
}
};

class CRemoteIndexWriteHelper : public CThorIndexWriteArg
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this class actually provided any benefit? It is legal to call createKeyBuilder() with null for the helper.
I suspect it adds complication with no benefit.
I don't think there is currently a way of adding bloom filters without a helper, but that it would be better to add virtuals to allow that, and apply the values directly from a property tree.

Long term that is the direction that disk write is going for many options.

{
UnexpectedVirtualFieldCallback fieldCallback;
Owned<const IDynamicTransform> translator;
std::map<std::string, std::string> indexMetaData;
public:
CRemoteIndexWriteHelper(const char * _filename, const char* _compression, IOutputMetaData * _inMeta, IOutputMetaData * _outMeta, unsigned _flags)
: filename(_filename), compression(_compression), inMeta(_inMeta), outMeta(_outMeta), flags(_flags)
{
const RtlRecord &inRecord = inMeta->queryRecordAccessor(true);
const RtlRecord &outRecord = outMeta->queryRecordAccessor(true);
translator.setown(createRecordTranslator(outRecord, inRecord));
}

virtual bool getIndexMeta(size32_t & lenName, char * & name, size32_t & lenValue, char * & value, unsigned idx)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

trivial: add 'override'

{
if (idx >= indexMetaData.size())
return false;

auto it = indexMetaData.begin();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

would a std::vector of a pair of std::string's be more suitable?

std::advance(it, idx);

lenName = it->first.length();
name = (char*) rtlMalloc(lenName);
memcpy(name, it->first.c_str(), lenName);

lenValue = it->second.length();
value = (char*) rtlMalloc(lenValue);
memcpy(value, it->second.c_str(), lenValue);

return true;
}

void setIndexMeta(const std::string& name, const std::string& value)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

picky: nicer if virtuals of IHThorIndexWriteArg kept together.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

doesn't getWidth() need to be implemented with count of meta fields for getIndexMeta to be callable ?

{
indexMetaData[name] = value;
}

virtual const char * getFileName() { return filename.c_str(); }
virtual int getSequence() { return 0; }
virtual IOutputMetaData * queryDiskRecordSize() { return outMeta; }
virtual const char * queryRecordECL() { return nullptr; }
virtual unsigned getFlags() { return flags; }
virtual size32_t transform(ARowBuilder & rowBuilder, const void * row, IBlobCreator * blobs, unsigned __int64 & filepos)
{
// Seems like an UnexpectedVirtualFieldCallback could be used but what about blobs?
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please create a separate jira for supporting blobs. It will need changes to the translator, including a new virtual in the callback interface.

return translator->translate(rowBuilder, fieldCallback, (const byte *)row);
}
virtual unsigned getKeyedSize()
{
if (outMeta == nullptr)
return 0;

const RtlRecord& recAccessor = outMeta->queryRecordAccessor(true);
return recAccessor.getFixedOffset(recAccessor.getNumKeyedFields());
}
virtual unsigned getMaxKeySize() { return 0; }
virtual unsigned getFormatCrc() { return 0; }
virtual const char * queryCompression() { return compression.c_str(); }
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

trivial: would be good to add "override" on all these.


public:
std::string filename;
std::string compression;
IOutputMetaData * inMeta = nullptr;
IOutputMetaData * outMeta = nullptr;
unsigned flags = 0;
};

class CRemoteIndexWriteActivity : public CRemoteWriteBaseActivity, implements IBlobCreator
{
Owned<IFileIOStream> iFileIOStream;
Owned<IKeyBuilder> builder;
Owned<CRemoteIndexWriteHelper> helper;
Linked<IOutputMetaData> inMeta, outMeta;
UnexpectedVirtualFieldCallback fieldCallback;
OwnedMalloc<char> prevRowBuffer;
OwnedMalloc<char> rowBuffer;

uint64_t uncompressedSize = 0;
uint64_t processed = 0;
size32_t maxDiskRecordSize = 0;
size32_t maxRecordSizeSeen = 0; // used to store the maximum record size seen, for metadata
bool isTlk = false;
bool opened = false;

inline void processRow(const void *row, uint64_t rowSize)
{
unsigned __int64 fpos = 0;
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Setting the fpos correctly here is a bit odd, this would definitely need to come from the incoming record, but an fpos may not always make sense, and because the datasets are often projected it isn't easy to reliable get the fpos of a read dataset.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fpos only applicable if building an index of a base dataset (where fpos' refer to offset in flat file).
Not sure there's any need to support it.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unfortunately ECL has weird semantics to reuse the fileposition field if the last field in the payload is numeric.

I think this is ok as it is, but the index will only be generally readable if it is defined with the FILEPOSITION(FALSE) attribute (if the last field is a numeric value).

If you want to be able to create all keys then you will need to do some horrible transformations to read the integer value of the last field, put it into the fileposition field.
Again create a separate jira to revisit.

RtlStaticRowBuilder rowBuilder(rowBuffer, maxDiskRecordSize);
size32_t indexRowSize = helper->transform(rowBuilder, row, this, fpos);

// Key builder checks for duplicate records so we can just check for sortedness
if (memcmp(prevRowBuffer.get(), rowBuffer.get(), helper->getKeyedSize()) > 0)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

but shouldn't be necessary and an extra "expense". Perhaps only in debug builds, or via a configurable option?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cache helper->getKeyedSize() in a member variable.

Copy link

Copilot AI Jul 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The comparison uses memcmp which performs byte-wise comparison, but this may not be correct for all data types. For complex types like strings with different encodings or numeric types with different byte representations, this could give incorrect sort order results. Consider using a proper record comparison function that understands the data types.

Copilot uses AI. Check for mistakes.
{
throw createDafsExceptionV(DAFSERR_cmdstream_generalwritefailure, "CRemoteIndexWriteActivity: Incoming rows are not sorted.");
}

builder->processKeyData(rowBuffer, fpos, indexRowSize);
uncompressedSize += (indexRowSize + sizeof(offset_t)); // Include FPOS in the uncompressed size
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

unused at moment, is the idea to expose this as a stat?
I'm not sure it should include fpos size..


if (indexRowSize > maxRecordSizeSeen)
maxRecordSizeSeen = indexRowSize;

processed++;
memcpy(prevRowBuffer.get(), rowBuffer.get(), maxDiskRecordSize);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

related to earlier comment - this is only validation, should it be in a debug build only, or via a configurable option?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Only need to save keyedSize. I suspect benefit of catching invalid input data outweighs the cost.

}

void openFileStream()
{
if (!recursiveCreateDirectoryForFile(fileName))
throw createDafsExceptionV(DAFSERR_cmdstream_openfailure, "Failed to create dirtory for file: '%s'", fileName.get());
Copy link

Copilot AI Jul 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Typo in error message: 'dirtory' should be 'directory'.

Suggested change
throw createDafsExceptionV(DAFSERR_cmdstream_openfailure, "Failed to create dirtory for file: '%s'", fileName.get());
throw createDafsExceptionV(DAFSERR_cmdstream_openfailure, "Failed to create directory for file: '%s'", fileName.get());

Copilot uses AI. Check for mistakes.
OwnedIFile iFile = createIFile(fileName);
assertex(iFile);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No need to check - never fails.


iFileIO.setown(iFile->open(IFOcreate));
if (!iFileIO)
throw createDafsExceptionV(DAFSERR_cmdstream_openfailure, "Failed to open: '%s' for write", fileName.get());

iFileIOStream.setown(createIOStream(iFileIO));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should probably use a buffered io (e.g. createBufferedIOStream)

opened = true;
}

virtual unsigned __int64 createBlob(size32_t size, const void * ptr)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

trivial: missing "override"

{
return builder->createBlob(size, (const char *) ptr);
}
public:

CRemoteIndexWriteActivity(IPropertyTree &config, IFileDescriptor *fileDesc) : CRemoteWriteBaseActivity(config, fileDesc)
{
inMeta.setown(getTypeInfoOutputMetaData(config, "input", false));
if (!inMeta)
throw createDafsException(DAFSERR_cmdstream_protocol_failure, "CRemoteIndexWriteActivity: input metadata missing");

outMeta.setown(getTypeInfoOutputMetaData(config, "output", false));
if (!outMeta)
throw createDafsException(DAFSERR_cmdstream_protocol_failure, "CRemoteIndexWriteActivity: output metadata missing");

std::string compression = config.queryProp("compressed", "default");
toLower(compression);
trim(compression);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

curious why this string might need leading spaces trimmed ? (vs any other string)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree, I'm not sure why you would trim this field.


unsigned flags = COL_PREFIX | HTREE_FULLSORT_KEY | USE_TRAILING_HEADER;

if (compression == "default")
{
flags |= HTREE_COMPRESSED_KEY;
compression = "";
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a better way to determine the "default" compression format?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this code should probably use translateToCompMethod(compression)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not really - it is only a very small subset of compression types.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Realistically I think you should always set htree_compressed_key and then pass through the compression as is. Row compression is not used outside the regression suite.

I would change the check in keybuild.cpp:

if (!isEmptyString(compression))

to

if (!isEmptyString(compression) && !strsame(compression, "lzw") && && !strsame(compression, "default"))

Which will allow lzw to be explicitly defined if we ever change the default.

}
else if (compression == "lzw")
{
flags |= HTREE_COMPRESSED_KEY;
compression = "";
}
else if (compression == "row")
{
compression = "";
flags |= HTREE_COMPRESSED_KEY | HTREE_QUICK_COMPRESSED_KEY;
}
else if (compression.substr(0,7) == "inplace")
{
flags |= HTREE_COMPRESSED_KEY;
}

bool isVariable = outMeta->isVariableSize();
if (isVariable)
flags |= HTREE_VARSIZE;

helper.setown(new CRemoteIndexWriteHelper(fileName.get(), compression.c_str(), inMeta, outMeta, flags));

unsigned nodeSize = NODESIZE;
if (config.hasProp("nodeSize"))
{
nodeSize = config.getPropInt("nodeSize");
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is an example of some code that is made more complicated by having the helper.

helper->setIndexMeta("_nodeSize", std::to_string(nodeSize));
}

if (config.hasProp("noSeek"))
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does it make sense to expose these options? I tried to match was exposed to ECL

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this should be defaulting to true - it should be true for blob storage, and doesn't really harm to be true for other systems.

{
bool noSeek = config.getPropBool("noSeek");
helper->setIndexMeta("_noSeek", noSeek ? "true" : "false");
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

trivial: can use boolToStr

if (noSeek)
flags |= TRAILING_HEADER_ONLY;
}

if (config.hasProp("useTrailingHeader"))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should always default on.

{
bool useTrailingHeader = config.getPropBool("useTrailingHeader");
helper->setIndexMeta("_useTrailingHeader", useTrailingHeader ? "true" : "false");
if (useTrailingHeader)
flags |= USE_TRAILING_HEADER;
else
flags &= ~USE_TRAILING_HEADER;
}

size32_t fileposSize = hasTrailingFileposition(helper->queryDiskRecordSize()->queryTypeInfo()) ? sizeof(offset_t) : 0;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Throw an error if it has a trailing fileposition - will require changes elsewhere.

if (isVariable)
{
if (helper->getFlags() & TIWmaxlength)
maxDiskRecordSize = helper->getMaxKeySize();
else
maxDiskRecordSize = KEYBUILD_MAXLENGTH; // Current default behaviour, could be improved in the future
}
else
maxDiskRecordSize = helper->queryDiskRecordSize()->getFixedSize()-fileposSize;

if (maxDiskRecordSize > KEYBUILD_MAXLENGTH)
throw MakeStringException(99, "Index maximum record length (%d) exceeds 32k internal limit", maxDiskRecordSize);

rowBuffer.allocateN(maxDiskRecordSize, true);
prevRowBuffer.allocateN(maxDiskRecordSize, true);
Copy link

Copilot AI Jul 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The prevRowBuffer is allocated with the full maxDiskRecordSize but only helper->getKeyedSize() bytes are used in the comparison. Consider allocating only the needed size for the keyed portion to reduce memory usage, especially for records with large non-keyed portions.

Suggested change
prevRowBuffer.allocateN(maxDiskRecordSize, true);
prevRowBuffer.allocateN(helper->getKeyedSize(), true);

Copilot uses AI. Check for mistakes.

openFileStream();
builder.setown(createKeyBuilder(iFileIOStream.get(), flags, maxDiskRecordSize, nodeSize, helper->getKeyedSize(), 0, helper.get(), compression.c_str(), true, false));
}

~CRemoteIndexWriteActivity()
{
Copy link

Copilot AI Jul 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The destructor performs complex operations including calling builder->finish() which could potentially throw exceptions. Destructors should not throw exceptions as this can lead to undefined behavior. Consider moving the finish() logic to a separate cleanup method that can be called explicitly before destruction.

Suggested change
{
{
try
{
cleanup();
}
catch (...)
{
// Log the exception or handle it appropriately
// Avoid propagating exceptions from the destructor
}
close();
}
void cleanup()
{

Copilot uses AI. Check for mistakes.
if (builder != nullptr && helper != nullptr)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

there is no alternative at the moment, but when the file is closed (StreamCmd::CLOSE), it should call through to the acitivity to close, so we don't depend on dtor's to do this kind of work.

For now, it would be worth aadding a try/catch - as any unhandled exception at this point (within a dtor) will cause the process to exit.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Related to this, I think you are going to need to serialize back the last row, so that when the client has finished writing all parts of an index, it can use those last parts to create the TLK.
The response from StreamCmd::CLOSE could be extended to return structured info, that container this serialize row data.

{
Owned<IPropertyTree> metadata;
metadata.setown(createPTree("metadata", ipt_fast));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

trivial: ^ could be on 1 line : Owned metadata = createPTree("metadata");

not worth diverging away from default to specify ipt_fast in this case, it's the default anyway.

buildUserMetadata(metadata, *helper);

metadata->setProp("_record_ECL", helper->queryRecordECL());
setRtlFormat(*metadata, helper->queryDiskRecordSize());

unsigned int fileCrc;
builder->finish(metadata, &fileCrc, maxRecordSizeSeen, nullptr);
}

close();
}

virtual void write(size32_t sz, const void *rowData) override
{
size32_t rowOffset = 0;
while(rowOffset < sz)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Need to handle partial records here.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it should be illegal to write partial records to this function. Otherwise you have some notable complications - the call to find the row size needs protecting if the row is partial.
For the moment throw an error if rowOffset > sz

{
const RtlRecord& inputRecordAccessor = inMeta->queryRecordAccessor(true);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could be done once and stored as member.

size32_t rowSize = inputRecordAccessor.getRecordSize(rowData);
processRow((const byte *)rowData + rowOffset, rowSize);
rowOffset += rowSize;
}
}

virtual void serializeCursor(MemoryBuffer &tgt) const override {}
virtual void restoreCursor(MemoryBuffer &src) override {}
};

// create a { unsigned8 } output meta for the count
static const RtlIntTypeInfo indexCountFieldType(type_unsigned|type_int, 8);
Expand Down Expand Up @@ -2924,6 +3174,11 @@ IRemoteActivity *createRemoteActivity(IPropertyTree &actNode, bool authorizedOnl
activity.setown(new CRemoteDiskWriteActivity(actNode, fileDesc));
break;
}
case TAKindexwrite:
{
activity.setown(new CRemoteIndexWriteActivity(actNode, fileDesc));
break;
}
default: // in absense of type, read is assumed and file format is auto-detected.
{
const char *action = actNode.queryProp("action");
Expand Down Expand Up @@ -4938,13 +5193,20 @@ class CRemoteFileServer : implements IRemoteFileServer, public CInterface
* {
* "format" : "binary",
* "command": "newstream"
* "replyLimit" : "64",
* "replylimit" : "64",
* "compressed" : "LZW", // Default, LZW, ROW, INPLACE:*
* "nodeSize" : 32768,
* "noSeek" : false, // if true don't add the header that allows seeking
* "node" : {
* "kind" : "indexwrite",
* "fileName": "examplefilename",
* "filename": "examplefilename",
* "input" : {
* "f1" : "string",
* "f2" : "string"
* },
* "output" : {
* "f1" : "string5",
* "f2" : "string5"
* "f2" : "string5",
* }
* }
* }
Expand Down
7 changes: 6 additions & 1 deletion system/jlib/jstring.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2699,7 +2699,12 @@ void toLower(std::string & value)
std::transform(value.cbegin(), value.cend(), value.begin(), func);
}


void trim(std::string & value)
{
value.erase(value.begin(), std::find_if(value.begin(), value.end(), [](unsigned char ch) {
return !std::isspace(ch);
}));
Copy link

Copilot AI Jul 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The trim function only removes leading whitespace but not trailing whitespace. A complete trim implementation should remove both leading and trailing whitespace. Consider using value.erase(std::find_if(value.rbegin(), value.rend(), [](unsigned char ch) { return !std::isspace(ch); }).base(), value.end()) to also remove trailing whitespace.

Suggested change
}));
}));
value.erase(std::find_if(value.rbegin(), value.rend(), [](unsigned char ch) {
return !std::isspace(ch);
}).base(), value.end());

Copilot uses AI. Check for mistakes.
}

StringBuffer & ncnameEscape(char const * in, StringBuffer & out)
{
Expand Down
1 change: 1 addition & 0 deletions system/jlib/jstring.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -588,6 +588,7 @@ inline StringBuffer& operator << (StringBuffer& s, const TValue& value)
}

extern jlib_decl void toLower(std::string & value);
extern jlib_decl void trim(std::string & value);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could do with comment.. what does it do? Looks like trims leading white space only not trailing


extern jlib_decl bool checkUnicodeLiteral(char const * str, unsigned length, unsigned & ep, StringBuffer & msg);
extern jlib_decl void decodeCppEscapeSequence(StringBuffer & out, const char * in, bool errorIfInvalid);
Expand Down
Loading