Node.js process exiting in the middle, with no error (using streams)












1














I'm writing a Lambda function which is given a list of text files on S3, and concatenates them together, and then zips that resulting file. For some reason, the function is bombing out in the middle of the process, with no errors.



The payload sent to the Lambda func looks like this:



{
"sourceFiles": [
"s3://bucket/largefile1.txt",
"s3://bucket/largefile2.txt"
],
"destinationFile": "s3://bucket/concat.zip",
"compress": true,
"omitHeader": false,
"preserveSourceFiles": true
}


The scenarios in which this function works totally fine:




  1. The two files are small, and compress === false

  2. The two files are small, and compress === true

  3. The two files are large, and compress === false


If I try to have it compress two large files, it quits in the middle. The concatenation process itself works fine, but when it tries to use zip-stream to add the stream to an archive, it fails.



The two large files together are 483,833 bytes. When the Lambda function fails, it reads either 290,229 or 306,589 bytes (it's random) then quits.



This is the main entry point of the function:



const packer = require('zip-stream');
const S3 = require('aws-sdk/clients/s3');
const s3 = new S3({ apiVersion: '2006-03-01' });
const { concatCsvFiles } = require('./csv');
const { s3UrlToParts } = require('./utils');

function addToZip(archive, stream, options) {
return new Promise((resolve, reject) => {
archive.entry(stream, options, (err, entry) => {
console.log('entry done', entry);
if (err) reject(err);
resolve(entry);
});
});
}

export const handler = async event => {
/**
* concatCsvFiles returns a readable stream to pass to either the archiver or
* s3.upload.
*/
let bytesRead = 0;

try {
const stream = await concatCsvFiles(event.sourceFiles, {
omitHeader: event.omitHeader,
});
stream.on('data', chunk => {
bytesRead += chunk.length;
console.log('read', bytesRead, 'bytes so far');
});
stream.on('end', () => {
console.log('this is never called :(');
});
const dest = s3UrlToParts(event.destinationFile);
let archive;

if (event.compress) {
archive = new packer();

await addToZip(archive, stream, { name: 'concat.csv' });
archive.finalize();
}

console.log('uploading');
await s3
.upload({
Body: event.compress ? archive : stream,
Bucket: dest.bucket,
Key: dest.key,
})
.promise();

console.log('done uploading');

if (!event.preserveSourceFiles) {
const s3Objects = event.sourceFiles.map(s3Url => {
const { bucket, key } = s3UrlToParts(s3Url);

return {
bucket,
key,
};
});

await s3
.deleteObjects({
Bucket: s3Objects[0].bucket,
Delete: {
Objects: s3Objects.map(s3Obj => ({ Key: s3Obj.key })),
},
})
.promise();
}

console.log('## Never gets here');

// return {
// newFile: event.destinationFile,
// };
} catch (e) {
if (e.code) {
throw new Error(e.code);
}

throw e;
}
};


And this is the concatenation code:



import MultiStream from 'multistream';
import { Readable } from 'stream';
import S3 from 'aws-sdk/clients/s3';
import { s3UrlToParts } from './utils';

const s3 = new S3({ apiVersion: '2006-03-01' });

/**
* Takes an array of S3 URLs and returns a readable stream of the concatenated results
* @param {string} s3Urls Array of S3 URLs
* @param {object} options Options
* @param {boolean} options.omitHeader Omit the header from the final output
*/
export async function concatCsvFiles(s3Urls, options = {}) {
// Get the header so we can use the length to set an offset in grabbing files
const firstFile = s3Urls[0];
const file = s3UrlToParts(firstFile);
const data = await s3
.getObject({
Bucket: file.bucket,
Key: file.key,
Range: 'bytes 0-512', // first 512 bytes is pretty safe for header size
})
.promise();
const streams = ;
const [header] = data.Body.toString().split('n');

for (const s3Url of s3Urls) {
const { bucket, key } = s3UrlToParts(s3Url);

const stream = s3
.getObject({
Bucket: bucket,
Key: key,
Range: `bytes=${header.length + 1}-`, // +1 for newline char
})
.createReadStream();
streams.push(stream);
}

if (!options.omitHeader) {
const headerStream = new Readable();
headerStream.push(header + 'n');
headerStream.push(null);
streams.unshift(headerStream);
}

const combinedStream = new MultiStream(streams);
return combinedStream;
}









share|improve this question
























  • first of all, use process.on('uncaughtException', (err) => console.log(err)) to see if there is something wrong, if nothing happened, I assume that this proccess was killed because of OOM.
    – Sean
    Nov 20 at 1:50










  • OOM, as in, out of memory? The whole point of streaming is that it takes up minimal memory. I don't think that's it.
    – ffxsam
    Nov 20 at 2:09










  • No uncaught exceptions, just checked.
    – ffxsam
    Nov 20 at 2:10










  • archive.finalize(); I think this will drain the stream to buffer that takes memory, and default lambda memory limit is 128mb, try concat without compress
    – Sean
    Nov 20 at 2:20










  • I've set the memory to 2GB, so memory is not the problem. And as I mentioned in my post, concat without zip compression works totally fine.
    – ffxsam
    Nov 20 at 3:23
















1














I'm writing a Lambda function which is given a list of text files on S3, and concatenates them together, and then zips that resulting file. For some reason, the function is bombing out in the middle of the process, with no errors.



The payload sent to the Lambda func looks like this:



{
"sourceFiles": [
"s3://bucket/largefile1.txt",
"s3://bucket/largefile2.txt"
],
"destinationFile": "s3://bucket/concat.zip",
"compress": true,
"omitHeader": false,
"preserveSourceFiles": true
}


The scenarios in which this function works totally fine:




  1. The two files are small, and compress === false

  2. The two files are small, and compress === true

  3. The two files are large, and compress === false


If I try to have it compress two large files, it quits in the middle. The concatenation process itself works fine, but when it tries to use zip-stream to add the stream to an archive, it fails.



The two large files together are 483,833 bytes. When the Lambda function fails, it reads either 290,229 or 306,589 bytes (it's random) then quits.



This is the main entry point of the function:



const packer = require('zip-stream');
const S3 = require('aws-sdk/clients/s3');
const s3 = new S3({ apiVersion: '2006-03-01' });
const { concatCsvFiles } = require('./csv');
const { s3UrlToParts } = require('./utils');

function addToZip(archive, stream, options) {
return new Promise((resolve, reject) => {
archive.entry(stream, options, (err, entry) => {
console.log('entry done', entry);
if (err) reject(err);
resolve(entry);
});
});
}

export const handler = async event => {
/**
* concatCsvFiles returns a readable stream to pass to either the archiver or
* s3.upload.
*/
let bytesRead = 0;

try {
const stream = await concatCsvFiles(event.sourceFiles, {
omitHeader: event.omitHeader,
});
stream.on('data', chunk => {
bytesRead += chunk.length;
console.log('read', bytesRead, 'bytes so far');
});
stream.on('end', () => {
console.log('this is never called :(');
});
const dest = s3UrlToParts(event.destinationFile);
let archive;

if (event.compress) {
archive = new packer();

await addToZip(archive, stream, { name: 'concat.csv' });
archive.finalize();
}

console.log('uploading');
await s3
.upload({
Body: event.compress ? archive : stream,
Bucket: dest.bucket,
Key: dest.key,
})
.promise();

console.log('done uploading');

if (!event.preserveSourceFiles) {
const s3Objects = event.sourceFiles.map(s3Url => {
const { bucket, key } = s3UrlToParts(s3Url);

return {
bucket,
key,
};
});

await s3
.deleteObjects({
Bucket: s3Objects[0].bucket,
Delete: {
Objects: s3Objects.map(s3Obj => ({ Key: s3Obj.key })),
},
})
.promise();
}

console.log('## Never gets here');

// return {
// newFile: event.destinationFile,
// };
} catch (e) {
if (e.code) {
throw new Error(e.code);
}

throw e;
}
};


And this is the concatenation code:



import MultiStream from 'multistream';
import { Readable } from 'stream';
import S3 from 'aws-sdk/clients/s3';
import { s3UrlToParts } from './utils';

const s3 = new S3({ apiVersion: '2006-03-01' });

/**
* Takes an array of S3 URLs and returns a readable stream of the concatenated results
* @param {string} s3Urls Array of S3 URLs
* @param {object} options Options
* @param {boolean} options.omitHeader Omit the header from the final output
*/
export async function concatCsvFiles(s3Urls, options = {}) {
// Get the header so we can use the length to set an offset in grabbing files
const firstFile = s3Urls[0];
const file = s3UrlToParts(firstFile);
const data = await s3
.getObject({
Bucket: file.bucket,
Key: file.key,
Range: 'bytes 0-512', // first 512 bytes is pretty safe for header size
})
.promise();
const streams = ;
const [header] = data.Body.toString().split('n');

for (const s3Url of s3Urls) {
const { bucket, key } = s3UrlToParts(s3Url);

const stream = s3
.getObject({
Bucket: bucket,
Key: key,
Range: `bytes=${header.length + 1}-`, // +1 for newline char
})
.createReadStream();
streams.push(stream);
}

if (!options.omitHeader) {
const headerStream = new Readable();
headerStream.push(header + 'n');
headerStream.push(null);
streams.unshift(headerStream);
}

const combinedStream = new MultiStream(streams);
return combinedStream;
}









share|improve this question
























  • first of all, use process.on('uncaughtException', (err) => console.log(err)) to see if there is something wrong, if nothing happened, I assume that this proccess was killed because of OOM.
    – Sean
    Nov 20 at 1:50










  • OOM, as in, out of memory? The whole point of streaming is that it takes up minimal memory. I don't think that's it.
    – ffxsam
    Nov 20 at 2:09










  • No uncaught exceptions, just checked.
    – ffxsam
    Nov 20 at 2:10










  • archive.finalize(); I think this will drain the stream to buffer that takes memory, and default lambda memory limit is 128mb, try concat without compress
    – Sean
    Nov 20 at 2:20










  • I've set the memory to 2GB, so memory is not the problem. And as I mentioned in my post, concat without zip compression works totally fine.
    – ffxsam
    Nov 20 at 3:23














1












1








1







I'm writing a Lambda function which is given a list of text files on S3, and concatenates them together, and then zips that resulting file. For some reason, the function is bombing out in the middle of the process, with no errors.



The payload sent to the Lambda func looks like this:



{
"sourceFiles": [
"s3://bucket/largefile1.txt",
"s3://bucket/largefile2.txt"
],
"destinationFile": "s3://bucket/concat.zip",
"compress": true,
"omitHeader": false,
"preserveSourceFiles": true
}


The scenarios in which this function works totally fine:




  1. The two files are small, and compress === false

  2. The two files are small, and compress === true

  3. The two files are large, and compress === false


If I try to have it compress two large files, it quits in the middle. The concatenation process itself works fine, but when it tries to use zip-stream to add the stream to an archive, it fails.



The two large files together are 483,833 bytes. When the Lambda function fails, it reads either 290,229 or 306,589 bytes (it's random) then quits.



This is the main entry point of the function:



const packer = require('zip-stream');
const S3 = require('aws-sdk/clients/s3');
const s3 = new S3({ apiVersion: '2006-03-01' });
const { concatCsvFiles } = require('./csv');
const { s3UrlToParts } = require('./utils');

function addToZip(archive, stream, options) {
return new Promise((resolve, reject) => {
archive.entry(stream, options, (err, entry) => {
console.log('entry done', entry);
if (err) reject(err);
resolve(entry);
});
});
}

export const handler = async event => {
/**
* concatCsvFiles returns a readable stream to pass to either the archiver or
* s3.upload.
*/
let bytesRead = 0;

try {
const stream = await concatCsvFiles(event.sourceFiles, {
omitHeader: event.omitHeader,
});
stream.on('data', chunk => {
bytesRead += chunk.length;
console.log('read', bytesRead, 'bytes so far');
});
stream.on('end', () => {
console.log('this is never called :(');
});
const dest = s3UrlToParts(event.destinationFile);
let archive;

if (event.compress) {
archive = new packer();

await addToZip(archive, stream, { name: 'concat.csv' });
archive.finalize();
}

console.log('uploading');
await s3
.upload({
Body: event.compress ? archive : stream,
Bucket: dest.bucket,
Key: dest.key,
})
.promise();

console.log('done uploading');

if (!event.preserveSourceFiles) {
const s3Objects = event.sourceFiles.map(s3Url => {
const { bucket, key } = s3UrlToParts(s3Url);

return {
bucket,
key,
};
});

await s3
.deleteObjects({
Bucket: s3Objects[0].bucket,
Delete: {
Objects: s3Objects.map(s3Obj => ({ Key: s3Obj.key })),
},
})
.promise();
}

console.log('## Never gets here');

// return {
// newFile: event.destinationFile,
// };
} catch (e) {
if (e.code) {
throw new Error(e.code);
}

throw e;
}
};


And this is the concatenation code:



import MultiStream from 'multistream';
import { Readable } from 'stream';
import S3 from 'aws-sdk/clients/s3';
import { s3UrlToParts } from './utils';

const s3 = new S3({ apiVersion: '2006-03-01' });

/**
* Takes an array of S3 URLs and returns a readable stream of the concatenated results
* @param {string} s3Urls Array of S3 URLs
* @param {object} options Options
* @param {boolean} options.omitHeader Omit the header from the final output
*/
export async function concatCsvFiles(s3Urls, options = {}) {
// Get the header so we can use the length to set an offset in grabbing files
const firstFile = s3Urls[0];
const file = s3UrlToParts(firstFile);
const data = await s3
.getObject({
Bucket: file.bucket,
Key: file.key,
Range: 'bytes 0-512', // first 512 bytes is pretty safe for header size
})
.promise();
const streams = ;
const [header] = data.Body.toString().split('n');

for (const s3Url of s3Urls) {
const { bucket, key } = s3UrlToParts(s3Url);

const stream = s3
.getObject({
Bucket: bucket,
Key: key,
Range: `bytes=${header.length + 1}-`, // +1 for newline char
})
.createReadStream();
streams.push(stream);
}

if (!options.omitHeader) {
const headerStream = new Readable();
headerStream.push(header + 'n');
headerStream.push(null);
streams.unshift(headerStream);
}

const combinedStream = new MultiStream(streams);
return combinedStream;
}









share|improve this question















I'm writing a Lambda function which is given a list of text files on S3, and concatenates them together, and then zips that resulting file. For some reason, the function is bombing out in the middle of the process, with no errors.



The payload sent to the Lambda func looks like this:



{
"sourceFiles": [
"s3://bucket/largefile1.txt",
"s3://bucket/largefile2.txt"
],
"destinationFile": "s3://bucket/concat.zip",
"compress": true,
"omitHeader": false,
"preserveSourceFiles": true
}


The scenarios in which this function works totally fine:




  1. The two files are small, and compress === false

  2. The two files are small, and compress === true

  3. The two files are large, and compress === false


If I try to have it compress two large files, it quits in the middle. The concatenation process itself works fine, but when it tries to use zip-stream to add the stream to an archive, it fails.



The two large files together are 483,833 bytes. When the Lambda function fails, it reads either 290,229 or 306,589 bytes (it's random) then quits.



This is the main entry point of the function:



const packer = require('zip-stream');
const S3 = require('aws-sdk/clients/s3');
const s3 = new S3({ apiVersion: '2006-03-01' });
const { concatCsvFiles } = require('./csv');
const { s3UrlToParts } = require('./utils');

function addToZip(archive, stream, options) {
return new Promise((resolve, reject) => {
archive.entry(stream, options, (err, entry) => {
console.log('entry done', entry);
if (err) reject(err);
resolve(entry);
});
});
}

export const handler = async event => {
/**
* concatCsvFiles returns a readable stream to pass to either the archiver or
* s3.upload.
*/
let bytesRead = 0;

try {
const stream = await concatCsvFiles(event.sourceFiles, {
omitHeader: event.omitHeader,
});
stream.on('data', chunk => {
bytesRead += chunk.length;
console.log('read', bytesRead, 'bytes so far');
});
stream.on('end', () => {
console.log('this is never called :(');
});
const dest = s3UrlToParts(event.destinationFile);
let archive;

if (event.compress) {
archive = new packer();

await addToZip(archive, stream, { name: 'concat.csv' });
archive.finalize();
}

console.log('uploading');
await s3
.upload({
Body: event.compress ? archive : stream,
Bucket: dest.bucket,
Key: dest.key,
})
.promise();

console.log('done uploading');

if (!event.preserveSourceFiles) {
const s3Objects = event.sourceFiles.map(s3Url => {
const { bucket, key } = s3UrlToParts(s3Url);

return {
bucket,
key,
};
});

await s3
.deleteObjects({
Bucket: s3Objects[0].bucket,
Delete: {
Objects: s3Objects.map(s3Obj => ({ Key: s3Obj.key })),
},
})
.promise();
}

console.log('## Never gets here');

// return {
// newFile: event.destinationFile,
// };
} catch (e) {
if (e.code) {
throw new Error(e.code);
}

throw e;
}
};


And this is the concatenation code:



import MultiStream from 'multistream';
import { Readable } from 'stream';
import S3 from 'aws-sdk/clients/s3';
import { s3UrlToParts } from './utils';

const s3 = new S3({ apiVersion: '2006-03-01' });

/**
* Takes an array of S3 URLs and returns a readable stream of the concatenated results
* @param {string} s3Urls Array of S3 URLs
* @param {object} options Options
* @param {boolean} options.omitHeader Omit the header from the final output
*/
export async function concatCsvFiles(s3Urls, options = {}) {
// Get the header so we can use the length to set an offset in grabbing files
const firstFile = s3Urls[0];
const file = s3UrlToParts(firstFile);
const data = await s3
.getObject({
Bucket: file.bucket,
Key: file.key,
Range: 'bytes 0-512', // first 512 bytes is pretty safe for header size
})
.promise();
const streams = ;
const [header] = data.Body.toString().split('n');

for (const s3Url of s3Urls) {
const { bucket, key } = s3UrlToParts(s3Url);

const stream = s3
.getObject({
Bucket: bucket,
Key: key,
Range: `bytes=${header.length + 1}-`, // +1 for newline char
})
.createReadStream();
streams.push(stream);
}

if (!options.omitHeader) {
const headerStream = new Readable();
headerStream.push(header + 'n');
headerStream.push(null);
streams.unshift(headerStream);
}

const combinedStream = new MultiStream(streams);
return combinedStream;
}






javascript node.js amazon-s3 aws-lambda node-streams






share|improve this question















share|improve this question













share|improve this question




share|improve this question








edited Nov 20 at 3:30

























asked Nov 20 at 1:39









ffxsam

6,889184493




6,889184493












  • first of all, use process.on('uncaughtException', (err) => console.log(err)) to see if there is something wrong, if nothing happened, I assume that this proccess was killed because of OOM.
    – Sean
    Nov 20 at 1:50










  • OOM, as in, out of memory? The whole point of streaming is that it takes up minimal memory. I don't think that's it.
    – ffxsam
    Nov 20 at 2:09










  • No uncaught exceptions, just checked.
    – ffxsam
    Nov 20 at 2:10










  • archive.finalize(); I think this will drain the stream to buffer that takes memory, and default lambda memory limit is 128mb, try concat without compress
    – Sean
    Nov 20 at 2:20










  • I've set the memory to 2GB, so memory is not the problem. And as I mentioned in my post, concat without zip compression works totally fine.
    – ffxsam
    Nov 20 at 3:23


















  • first of all, use process.on('uncaughtException', (err) => console.log(err)) to see if there is something wrong, if nothing happened, I assume that this proccess was killed because of OOM.
    – Sean
    Nov 20 at 1:50










  • OOM, as in, out of memory? The whole point of streaming is that it takes up minimal memory. I don't think that's it.
    – ffxsam
    Nov 20 at 2:09










  • No uncaught exceptions, just checked.
    – ffxsam
    Nov 20 at 2:10










  • archive.finalize(); I think this will drain the stream to buffer that takes memory, and default lambda memory limit is 128mb, try concat without compress
    – Sean
    Nov 20 at 2:20










  • I've set the memory to 2GB, so memory is not the problem. And as I mentioned in my post, concat without zip compression works totally fine.
    – ffxsam
    Nov 20 at 3:23
















first of all, use process.on('uncaughtException', (err) => console.log(err)) to see if there is something wrong, if nothing happened, I assume that this proccess was killed because of OOM.
– Sean
Nov 20 at 1:50




first of all, use process.on('uncaughtException', (err) => console.log(err)) to see if there is something wrong, if nothing happened, I assume that this proccess was killed because of OOM.
– Sean
Nov 20 at 1:50












OOM, as in, out of memory? The whole point of streaming is that it takes up minimal memory. I don't think that's it.
– ffxsam
Nov 20 at 2:09




OOM, as in, out of memory? The whole point of streaming is that it takes up minimal memory. I don't think that's it.
– ffxsam
Nov 20 at 2:09












No uncaught exceptions, just checked.
– ffxsam
Nov 20 at 2:10




No uncaught exceptions, just checked.
– ffxsam
Nov 20 at 2:10












archive.finalize(); I think this will drain the stream to buffer that takes memory, and default lambda memory limit is 128mb, try concat without compress
– Sean
Nov 20 at 2:20




archive.finalize(); I think this will drain the stream to buffer that takes memory, and default lambda memory limit is 128mb, try concat without compress
– Sean
Nov 20 at 2:20












I've set the memory to 2GB, so memory is not the problem. And as I mentioned in my post, concat without zip compression works totally fine.
– ffxsam
Nov 20 at 3:23




I've set the memory to 2GB, so memory is not the problem. And as I mentioned in my post, concat without zip compression works totally fine.
– ffxsam
Nov 20 at 3:23












1 Answer
1






active

oldest

votes


















0














Got it. The problem was actually with the zip-stream library. Apparently it doesn't work well with S3 + streaming. I tried yazl and it works perfectly.






share|improve this answer





















    Your Answer






    StackExchange.ifUsing("editor", function () {
    StackExchange.using("externalEditor", function () {
    StackExchange.using("snippets", function () {
    StackExchange.snippets.init();
    });
    });
    }, "code-snippets");

    StackExchange.ready(function() {
    var channelOptions = {
    tags: "".split(" "),
    id: "1"
    };
    initTagRenderer("".split(" "), "".split(" "), channelOptions);

    StackExchange.using("externalEditor", function() {
    // Have to fire editor after snippets, if snippets enabled
    if (StackExchange.settings.snippets.snippetsEnabled) {
    StackExchange.using("snippets", function() {
    createEditor();
    });
    }
    else {
    createEditor();
    }
    });

    function createEditor() {
    StackExchange.prepareEditor({
    heartbeatType: 'answer',
    autoActivateHeartbeat: false,
    convertImagesToLinks: true,
    noModals: true,
    showLowRepImageUploadWarning: true,
    reputationToPostImages: 10,
    bindNavPrevention: true,
    postfix: "",
    imageUploader: {
    brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
    contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
    allowUrls: true
    },
    onDemand: true,
    discardSelector: ".discard-answer"
    ,immediatelyShowMarkdownHelp:true
    });


    }
    });














    draft saved

    draft discarded


















    StackExchange.ready(
    function () {
    StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53385050%2fnode-js-process-exiting-in-the-middle-with-no-error-using-streams%23new-answer', 'question_page');
    }
    );

    Post as a guest















    Required, but never shown

























    1 Answer
    1






    active

    oldest

    votes








    1 Answer
    1






    active

    oldest

    votes









    active

    oldest

    votes






    active

    oldest

    votes









    0














    Got it. The problem was actually with the zip-stream library. Apparently it doesn't work well with S3 + streaming. I tried yazl and it works perfectly.






    share|improve this answer


























      0














      Got it. The problem was actually with the zip-stream library. Apparently it doesn't work well with S3 + streaming. I tried yazl and it works perfectly.






      share|improve this answer
























        0












        0








        0






        Got it. The problem was actually with the zip-stream library. Apparently it doesn't work well with S3 + streaming. I tried yazl and it works perfectly.






        share|improve this answer












        Got it. The problem was actually with the zip-stream library. Apparently it doesn't work well with S3 + streaming. I tried yazl and it works perfectly.







        share|improve this answer












        share|improve this answer



        share|improve this answer










        answered Nov 20 at 4:31









        ffxsam

        6,889184493




        6,889184493






























            draft saved

            draft discarded




















































            Thanks for contributing an answer to Stack Overflow!


            • Please be sure to answer the question. Provide details and share your research!

            But avoid



            • Asking for help, clarification, or responding to other answers.

            • Making statements based on opinion; back them up with references or personal experience.


            To learn more, see our tips on writing great answers.





            Some of your past answers have not been well-received, and you're in danger of being blocked from answering.


            Please pay close attention to the following guidance:


            • Please be sure to answer the question. Provide details and share your research!

            But avoid



            • Asking for help, clarification, or responding to other answers.

            • Making statements based on opinion; back them up with references or personal experience.


            To learn more, see our tips on writing great answers.




            draft saved


            draft discarded














            StackExchange.ready(
            function () {
            StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53385050%2fnode-js-process-exiting-in-the-middle-with-no-error-using-streams%23new-answer', 'question_page');
            }
            );

            Post as a guest















            Required, but never shown





















































            Required, but never shown














            Required, but never shown












            Required, but never shown







            Required, but never shown

































            Required, but never shown














            Required, but never shown












            Required, but never shown







            Required, but never shown







            Popular posts from this blog

            "Incorrect syntax near the keyword 'ON'. (on update cascade, on delete cascade,)

            Alcedinidae

            Origin of the phrase “under your belt”?