I am creating a flask application with JavaScript to save the live video streams to a file.
What I am trying to achieve here is that, the video stream will be sent to flask application periodically (20 secs). The first time it will create a video and after that, the video needs to be merged to the existing file.
I am using SocketIO to transmit the video from JS.
`async function startCapture() {
try {
// Access the user's webcam
stream = await navigator.mediaDevices.getUserMedia({
video: true,
audio: { echoCancellation: true, noiseSuppression: true },
});
// Attach the stream to the video element
video.srcObject = stream;
// Create a new MediaRecorder instance to capture video chunks
recorder = new MediaRecorder(stream);
// Event handler for each data chunk received from the recorder
recorder.ondataavailable = (e) => {
const videoBlob = e.data;
transmitVideoChunk(videoBlob);
chunks.push(e.data);
};
// Start recording the video stream
recorder.start();
// Enable/disable buttons
startButton.disabled = true;
stopButton.disabled = false;
// Start transmitting video chunks at the desired fps
startTransmitting();
} catch (error) {
console.error('Error accessing webcam:', error);
}
}`
`
function transmitVideoBlob() {
const videoBlob = new Blob(chunks, { type: 'video/webm' });
socket.emit('video_data', videoBlob);
// Clear the chunks array
chunks = [];
}
// Start transmitting video chunks at the desired fps
function startTransmitting() {
const videoInterval = 20000; // Interval between frames in milliseconds
videoIntervalId = setInterval(() => {
transmitVideoBlob();
}, videoInterval);
}`
In flask, I have created a function, which will call create_videos. video_path: location to save the video filename: file name of video new_video_data_blob: binary data received from JS
def create_videos(video_path, filename, new_video_data_blob):
chunk_filename = os.path.join(video_path, f"{str(uuid1())}_{filename}")
final_filename = os.path.join(video_path, filename)
out_final_filename = os.path.join(video_path, "out_" + filename)
# Save the current video chunk to a file
with open(chunk_filename, "wb") as f:
print("create file chunk ", chunk_filename)
f.write(new_video_data_blob)
if not os.path.exists(final_filename):
# If the final video file doesn't exist, rename the current chunk file
os.rename(chunk_filename, final_filename)
else:
while not os.path.exists(chunk_filename):
time.sleep(0.1)
# If the final video file exists, use FFmpeg to concatenate the current chunk with the existing file
try:
subprocess.run(
[
"ffmpeg",
"-i",
f"concat:{final_filename}|{chunk_filename}",
"-c",
"copy",
"-y",
out_final_filename,
]
)
while not os.path.exists(out_final_filename):
time.sleep(0.1)
os.remove(final_filename)
os.rename(out_final_filename, final_filename)
except Exception as e:
print(e)
# Remove the current chunk file
os.remove(chunk_filename)
return final_filename
When I record as well using below code in JS
audio: { echoCancellation: true, noiseSuppression: true },
I get the following error.
[NULL @ 0x55e697e8c900] Invalid profile 5.
[webm @ 0x55e697ec3180] Non-monotonous DTS in output stream 0:0; previous: 37075, current: 37020; changing to 37075. This may result in incorrect timestamps in the output file.
[NULL @ 0x55e697e8d8c0] Error parsing Opus packet header.
Last message repeated 1 times
[NULL @ 0x55e697e8c900] Invalid profile 5.
[NULL @ 0x55e697e8d8c0] Error parsing Opus packet header.
But when I record video only, it will work fine.
How can I merge the new binary data to the existing video file?
It seems that I don't need to merge the videos in every interval. I can merge the videos every time the socket is disconnected. Following are the steps I have done to solve the issue is as follows.
When receiving data from sockets, I have called the "create_videos" function. For a single session i.e. until the socket is disconnected, the binary video can be appended to a binary video file and video file will work fine.
Note: When trying to append binary data after socket is reconnected, the new chunk of data is not reflected in the video although the video size is increased. (Not sure why)
def create_videos(video_path, filename, new_video_data_blob):
final_filename = os.path.join(video_path, filename)
# Save the current video chunk to a file
with open(final_filename, "ab") as f:
print("create file chunk ", final_filename)
f.write(new_video_data_blob)
f.flush() # Ensure data is flushed to disk
f.close() # Properly close the file
Once a session (period between socket connection and disconnection) is over and socket gets disconnected, the following will occur.
If a file by name "session_1.webm" exists, it will execute the program to compress file (needed in my case) and call the "merge_videos" function
In merge_videos function, if the existing file does not exists with name 1.webm (required file name), it will simply rename the file.
But if it does exists, it read all the files in the directory (separated by ids), which will basically always have 2 files and sort them so that the latest files comes at last.This will create a "txt" file which will hold paths of files for concatenation.
Then ffmpeg library is used to merge the actual files instead of directly adding chunks to the file.
@socketio.on("disconnect")
def disconnect():
file_path = os.path.join("media", "1", "1", "recordings")
session_file_name = "session_1.webm"
new_file_name = f"{str(int(time.time()))}.webm"
if os.path.exists(os.path.join(file_path, session_file_name)):
os.rename(
os.path.join(file_path, session_file_name),
os.path.join(file_path, new_file_name),
)
# Compress Video
compress_video(
os.path.join(file_path, new_file_name),
os.path.join(file_path, "comp_" + new_file_name),
)
# Merge Videos after compression with existing video
merge_videos(file_path, "1.webm", "comp_" + new_file_name)
print("Socket Connection Ended")
def merge_videos(video_path, existing_file_name, session_file_name):
session_file = os.path.join(video_path, session_file_name)
final_filename = os.path.join(video_path, existing_file_name)
out_final_filename = os.path.join(video_path, "out_" + session_file_name)
if not os.path.exists(final_filename):
os.rename(session_file, final_filename)
return
for subdirs, dirs, files in os.walk(video_path):
videos_lis = []
for file in files:
if file.split(".")[1] != "webm":
continue
media_in = file
videos_lis.append(media_in)
videos_lis.sort()
with open(f"{video_path}/input.txt", "w") as f:
for videos in videos_lis:
f.write(f"file '{videos}'\n")
try:
subprocess.run(
[
"ffmpeg",
"-f",
"concat",
"-i",
f"{video_path}/input.txt",
"-y",
out_final_filename,
]
)
os.remove(final_filename)
os.remove(os.path.join(video_path, session_file_name))
os.rename(out_final_filename, final_filename)
except Exception as e:
print(e)
os.remove(os.path.join(video_path, "input.txt"))
print("Success")
def compress_video(video_path, new_path):
command = [
"ffmpeg",
"-i",
video_path,
"-y",
"-crf",
"40",
new_path,
]
result = subprocess.run(command)
if result.returncode == 0:
os.remove(video_path)
print("Command executed successfully.")
else:
print("An error occurred while executing the command.")
Note: If I do not create a txt file and directly concat 2 files using ffmpeg, I get the following error. "Non-monotonous DTS in output stream 0:0 This may result in incorrect timestamps in the output file."
So, this is my current solution to the problem