I'm in the process of experimenting with Fine Uploader in order to implement it in our website. I really love the chunking and resume feature, but I'm experiencing some problems putting the files back together server side; they're corrupt after I do so. After some investigation I found out that each chunk is 194 bytes too large which makes that the resulting file is x 194 bytes too large. Is this a known problem? If need be I will post my code. Thanks for your time.
EDIT Here is my sscce. I forgot to specify I'm using ASP.NET C#.
The initialisation of the uploader on the webpage
$(document).ready(function () {
var manualuploader = new qq.FineUploader({
element: $('#fine-uploader')[0],
request: {
endpoint: 'UploadHandler.ashx',
forceMultipart: true
},
chunking: {
enabled: true
},
resume: {
enabled: true
},
retry: {
enableAuto: true
},
callbacks: {
onSubmit: function (id, fileName) {
document.getElementById('triggerUpload').style.visibility = 'visible';
}
}
});
});
And the server side handler (c#):
<%@ WebHandler Language="C#" Class="UploadHandler" %>
using System;
using System.Web;
public class UploadHandler : IHttpHandler, System.Web.SessionState.IReadOnlySessionState
{
private int completed;
public void ProcessRequest(HttpContext context)
{
HttpRequest request = context.Request;
string partIndex = request.Params["qqpartindex"];
int totalParts = Convert.ToInt32(request.Params["qqtotalparts"]);
String filename = request.Params["qqfilename"];
String totalFileSizeName = request.Params["qqtotalfilesize"];
string uploadedTemp = context.Server.MapPath("~/App_Data/" + "TEMP/");
string uploadedLocation = context.Server.MapPath("~/App_Data/");
string filePath = System.IO.Path.Combine(uploadedTemp, partIndex + ".tmp");
if (!System.IO.File.Exists(filePath))
{
System.IO.Stream inputStream = request.InputStream;
using (System.IO.FileStream fileStream = System.IO.File.OpenWrite(filePath))
{
inputStream.CopyTo(fileStream);
}
}
completed = 0;
if (partIndex.Equals(Convert.ToString(totalParts - 1))) // all chunks have arrived
{
mergeTempFiles(uploadedTemp, uploadedLocation, filename);
completed = 1;
}
context.Response.ContentType = "application/json";
context.Response.Write("{\"success\":true, \"completed\": " + completed +"}");
}
public bool IsReusable
{
get { return true; }
}
public void mergeTempFiles(string pathOrigin, string pathToSave, string filename)
{
string[] tmpfiles = System.IO.Directory.GetFiles(pathOrigin, "*.tmp");
if (!System.IO.Directory.Exists(pathToSave))
{
System.IO.Directory.CreateDirectory(pathToSave);
}
System.IO.FileStream outPutFile = new System.IO.FileStream(pathToSave + filename, System.IO.FileMode.Create, System.IO.FileAccess.Write);
foreach (string tempFile in tmpfiles)
{
int bytesRead = 0;
byte[] buffer = new byte[1024];
System.IO.FileStream inputTempFile = new System.IO.FileStream(tempFile, System.IO.FileMode.OpenOrCreate, System.IO.FileAccess.Read);
while ((bytesRead = inputTempFile.Read(buffer, 0, 1024)) > 0)
outPutFile.Write(buffer, 0, bytesRead);
inputTempFile.Close();
//System.IO.File.Delete(tempFile);
}
outPutFile.Close();
}
}
The problem was in the way I was parsing the inputstream (in my c# handler class) for the individual chunks from the request object,
The way I was reading it:
System.IO.Stream inputStream = request.InputStream;
The way it should be read:
System.IO.Stream inputStream = request.Files[0].InputStream;
This google groups post suggested that the second way should only be done in IE, and the first way in all other browsers, but I found out it's like this in all browsers.