getBlobToStream 方法来自旧的azure-storage 包。在新的@azure/storage-blob 中,它是download。下面是使用archiver 压缩下载流并使用fs 保存到文件“test.zip”的代码。
async function main() {
const { BlobServiceClient, StorageSharedKeyCredential } = require("@azure/storage-blob");
const STORAGE_ACCOUNT_NAME = "<your storage account name>";
const ACCOUNT_ACCESS_KEY = "<your storage account key>";
const containerName = "<your container name>";
const blobName = "<your blob name>";
const zipFilePath = "D:\\test.zip"; // a path where the output zip file would get saved
const credentials = new StorageSharedKeyCredential(STORAGE_ACCOUNT_NAME, ACCOUNT_ACCESS_KEY);
const blobServiceClient = new BlobServiceClient(`https://${STORAGE_ACCOUNT_NAME}.blob.core.windows.net`,credentials);
const containerClient = blobServiceClient.getContainerClient(containerName);
const blobClient = containerClient.getBlobClient(blobName);
const response = await blobClient.download(0); // download from 0 offset
await streamToCompressed(response.blobDownloadStream, zipFilePath, blobName);
}
async function streamToCompressed(readableStream, outputFilePath, blobName) {
return new Promise((resolve, reject) => {
const fs = require("fs");
const archiver = require('archiver');
// create a file to stream archive data to.
// In case you want to directly stream output in http response of express, just grab 'res' in that case instead of creating file stream
const output = fs.createWriteStream(outputFilePath);
const archive = archiver('zip', {
zlib: { level: 9 } // Sets the compression level.
});
// listen for all archive data to be written
// 'close' event is fired only when a file descriptor is involved
output.on('close', () => {
console.log(archive.pointer() + ' total bytes');
console.log('archiver has been finalized and the output file descriptor has closed.');
resolve();
});
// good practice to catch warnings (ie stat failures and other non-blocking errors)
archive.on('warning', (err) => {
if (err.code === 'ENOENT') {
// log warning
} else {
// throw error
throw err;
}
});
// good practice to catch this error explicitly
archive.on('error', (err) => {
throw err;
});
// pipe archive data to the file
archive.pipe(output);
// finalize the archive (ie we are done appending files but streams have to finish yet)
archive.append(readableStream, { name: blobName })
.finalize();
readableStream.on("error", reject);
});
}
main().then(() => console.log('Done')).catch((ex) => console.log(ex.message));
更新多个文件支持:
async function main() {
const { BlobServiceClient, StorageSharedKeyCredential } = require("@azure/storage-blob");
const STORAGE_ACCOUNT_NAME = "<your storage account name>";
const ACCOUNT_ACCESS_KEY = "<your storage account key>";
const containerName = "<your container name>";
const blobNames = [ "blob 1 name", "blob 2 name" ];
const zipFilePath = "D:\\test.zip";
const credentials = new StorageSharedKeyCredential(STORAGE_ACCOUNT_NAME, ACCOUNT_ACCESS_KEY);
const blobServiceClient = new BlobServiceClient(`https://${STORAGE_ACCOUNT_NAME}.blob.core.windows.net`,credentials);
const containerClient = blobServiceClient.getContainerClient(containerName);
const streamDict = {}; // to have a map of blobName and it's corresponding stream
for(const i in blobNames)
{
const blobName = blobNames[i];
const blobClient = containerClient.getBlobClient(blobName);
const response = await blobClient.download(0); // download from 0 offset
streamDict[blobName] = response.blobDownloadStream;
}
await streamsToCompressed(streamDict, zipFilePath);
}
async function streamsToCompressed(streamDict, outputFilePath) {
return new Promise((resolve, reject) => {
const fs = require("fs");
const archiver = require('archiver');
// create a file to stream archive data to.
// In case you want to directly stream output in http response of express, just grab 'res' in that case instead of creating file stream
const output = fs.createWriteStream(outputFilePath);
const archive = archiver('zip', {
zlib: { level: 9 } // Sets the compression level.
});
// listen for all archive data to be written
// 'close' event is fired only when a file descriptor is involved
output.on('close', () => {
console.log(archive.pointer() + ' total bytes');
console.log('archiver has been finalized and the output file descriptor has closed.');
});
// good practice to catch warnings (ie stat failures and other non-blocking errors)
archive.on('warning', (err) => {
if (err.code === 'ENOENT') {
// log warning
} else {
// throw error
throw err;
}
});
// good practice to catch this error explicitly
archive.on('error', (err) => {
throw err;
});
// pipe archive data to the file
archive.pipe(output);
for(const blobName in streamDict) {
const readableStream = streamDict[blobName];
// finalize the archive (ie we are done appending files but streams have to finish yet)
archive.append(readableStream, { name: blobName });
readableStream.on("error", reject);
}
archive.finalize();
resolve();
});
}
main().then(() => console.log('Done')).catch((ex) => console.log(ex.message));