|
4 | 4 | // eslint-disable-next-line no-unused-vars |
5 | 5 | const { ContainerClient } = require('@azure/storage-blob') |
6 | 6 | const memoryCache = require('memory-cache') |
7 | | -const { Readable } = require('stream') |
8 | 7 | const URL = require('url') |
9 | 8 |
|
10 | 9 | class AzureStorageDocStore { |
@@ -41,11 +40,23 @@ class AzureStorageDocStore { |
41 | 40 | blobMetadata.extra = JSON.stringify(document._metadata.extra) |
42 | 41 | } |
43 | 42 | const options = { metadata: blobMetadata, blobHTTPHeaders: { blobContentType: 'application/json' } } |
44 | | - const dataStream = new Readable() |
45 | | - dataStream.push(JSON.stringify(document)) |
46 | | - dataStream.push(null) |
| 43 | + const data = JSON.stringify(document) |
47 | 44 | const blockBlobClient = this.containerClient.getBlockBlobClient(blobName) |
48 | | - await blockBlobClient.uploadStream(dataStream, 8 << 20, 5, options) |
| 45 | + |
| 46 | + // Use streaming for large documents (>100MB), direct upload for small |
| 47 | + const SIZE_THRESHOLD = 100 * 1024 * 1024 |
| 48 | + |
| 49 | + if (data.length > SIZE_THRESHOLD) { |
| 50 | + // Large documents: use streaming (note: still has multi-instance race condition risk) |
| 51 | + const { Readable } = require('stream') |
| 52 | + const dataStream = new Readable() |
| 53 | + dataStream.push(data) |
| 54 | + dataStream.push(null) |
| 55 | + await blockBlobClient.uploadStream(dataStream, 8 << 20, 5, options) |
| 56 | + } else { |
| 57 | + // Small documents: atomic upload (eliminates race conditions) |
| 58 | + await blockBlobClient.upload(data, data.length, options) |
| 59 | + } |
49 | 60 | return blobName |
50 | 61 | } |
51 | 62 |
|
|
0 commit comments