I am trying to write a program that can convert HTTP URLs to torrents. This project is working basically working for download links contain small files. Like a file of 1-500Mb. My problem is if the file size is more than that the app is crashing or getting a timeout. So I want to know how to fix this. Below is my code and link to Github.
https://github.com/savadks95/FireBit
var http = require('http');
var webtorrentify = require('webtorrentify-link');
var fs = require('fs');
var url = require("url");
var path = require("path");
var validUrl = require('valid-url');
var express = require('express');
var getUrls = require('get-urls');
var remote = require('remote-file-size');
var app = express();
var downloadLink;
var fileName;
var fileSize;
var server;
var parsed;
var param;
var link;
var port;
port = process.env.PORT || 80;
app.get('/favicon.ico', function(req, res){
console.log('favicon request recived');
});
app.get('*', function(req, res){
if(req.url==='/'){
//app.use('/public/html', express.static(path.join(__dirname)));
fs.readFile('public/html/index.html', function (err, data) {
res.write(data);
});
}else if (req.url === '/l?thelink='){
fs.readFile('public/html/emptyRequest.html', function (err, data) {
res.write(data);
res.end();
});
}else{
//---------Reciving Url--------------------
console.log(req.query.thelink);
downloadLink=req.query.thelink;
//-----------------------------------------
//------------checking for valid url-------
if (validUrl.isUri(downloadLink)) {
console.log('Looks like an URL');
//-----------------------------------------
//----------Extracting filename-------------
parsed = url.parse(downloadLink);
fileName = path.basename(parsed.pathname);
console.log(path.basename(parsed.pathname));
//-------------------------------------------
//----------Finding File size----------------
remote(downloadLink, function(err, o) {
fileSize = (o/1024)/1024;
console.log('size of ' + fileName + ' = ' + fileSize+" MB");
//-------------------------------------------
if (fileSize < 501)
{
///////////////Creating Torrent////////////////////
webtorrentify(downloadLink)
.then(function (buffer) {
console.log('creating the torrent');
//res.send('what is');
//-------------------------------------------
res.setHeader('Content-Type', 'application/x-bittorrent');
res.setHeader('Content-Disposition', `inline; filename="${fileName}.torrent"`);
res.setHeader('Cache-Control', 'public, max-age=2592000'); // 30 days
res.send(buffer);
console.log(fileName+'.torrent created');
res.end();
//-------------------------------------------
});
////////////////////////////////////////////////
}
else{
console.log('More than 500 MB');
res.send("<h4> More than 500 MB or invalid URL </h4>");
}
});
}
else {
console.log('not url');
fs.readFile('public/html/404.html', function (err, data) {
res.write(data);
res.end();
});
}
}
});
app.listen(port);
console.log('server up and running', port);
Node.js’s pure file (fs) and big data handling functions usually fall a little short of handling files beyond 1GB, but with just one extra NPM package, EventStream, you can be able to parse through a massive dataset without crashing the Node server. With EventStream package, you can download file sizes up to 3GB and above.
A detailed implementation is given here in the below link. I would like to reiterate that the example implementation given in the below link solves your large file download problem. Your ability to convert http urls into torrent stream, you seem to handle it already very well.
https://itnext.io/using-node-js-to-read-really-really-large-files-pt-1-d2057fe76b33
And here is the NPM package for the event-stream module.
https://www.npmjs.com/package/event-stream
Hope this helps.