Search code examples
pythonpython-multithreadingpermissionerror

PermissionError: [Errno 13] Permission denied: 'welcome2.mp3'


I am doing my final year project named as "Driver Drowsiness System" in which I am using haar-caascade and openCV. So after detection after driver is drowsy or yawny, there are two diffrent alarms which go-off and for that I have used Threading in Python. But after detecting yawn twice it shows ThreadException in 2nd Thread. Please help me.

Code :

#python drowniness_yawn.py --webcam webcam_index

from scipy.spatial import distance as dist
from imutils.video import VideoStream
from imutils import face_utils
from threading import Thread
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
import os
import os
import playsound as ps
from gtts import gTTS
def alarm(msg):  # code for alarm
    global alarm_status
    global alarm_status2
    global saying
    language = 'en'
   

    while alarm_status:
        print('call')
        s = msg+'"'
        myobj = gTTS(text=s, lang=language, slow=False)
        myobj.save("welcome.mp3")
        # os.system("mpg321 welcome.mp3")
        ps.playsound("welcome.mp3")
       

    if alarm_status2:
        print('call2')
        saying = True
        s =  msg + '"'
        # os.system(s)
        myobj = gTTS(text=s, lang=language, slow=False)
        myobj.save("welcome2.mp3")
        try:
             ps.playsound("welcome2.mp3")
           

        except:
            pass

       
        saying = False

def eye_aspect_ratio(eye):
    A = dist.euclidean(eye[1], eye[5])
    B = dist.euclidean(eye[2], eye[4])

    C = dist.euclidean(eye[0], eye[3])

    ear = (A + B) / (2.0 * C)

    return ear

def final_ear(shape):
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    leftEye = shape[lStart:lEnd]
    rightEye = shape[rStart:rEnd]

    leftEAR = eye_aspect_ratio(leftEye)
    rightEAR = eye_aspect_ratio(rightEye)

    ear = (leftEAR + rightEAR) / 2.0
    return (ear, leftEye, rightEye)

def lip_distance(shape):
    top_lip = shape[50:53]
    top_lip = np.concatenate((top_lip, shape[61:64]))

    low_lip = shape[56:59]
    low_lip = np.concatenate((low_lip, shape[65:68]))

    top_mean = np.mean(top_lip, axis=0)
    low_mean = np.mean(low_lip, axis=0)

    distance = abs(top_mean[1] - low_mean[1])
    return distance


ap = argparse.ArgumentParser()
ap.add_argument("-w", "--webcam", type=int, default=0,
                help="index of webcam on system")
args = vars(ap.parse_args())

EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 30
YAWN_THRESH = 20
alarm_status = False
alarm_status2 = False
saying = False
COUNTER = 0

print("-> Loading the predictor and detector...")
#detector = dlib.get_frontal_face_detector()
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")    #Faster but less accurate
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')


print("-> Starting Video Stream")
vs = VideoStream(src=args["webcam"]).start()
#vs= VideoStream(usePiCamera=True).start()       //For Raspberry Pi
time.sleep(1.0)

while True:

    frame = vs.read()
    frame = imutils.resize(frame, width=450)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    #rects = detector(gray, 0)
    rects = detector.detectMultiScale(gray, scaleFactor=1.1, 
        minNeighbors=5, minSize=(30, 30),
        flags=cv2.CASCADE_SCALE_IMAGE)

    #for rect in rects:
    for (x, y, w, h) in rects:
        rect = dlib.rectangle(int(x), int(y), int(x + w),int(y + h))
        
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        eye = final_ear(shape)
        ear = eye[0]
        leftEye = eye [1]
        rightEye = eye[2]

        distance = lip_distance(shape)

        leftEyeHull = cv2.convexHull(leftEye)
        rightEyeHull = cv2.convexHull(rightEye)
        cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
        cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

        lip = shape[48:60]
        cv2.drawContours(frame, [lip], -1, (0, 255, 0), 1)

        if ear < EYE_AR_THRESH:
            COUNTER += 1

            if COUNTER >= EYE_AR_CONSEC_FRAMES:
                if alarm_status == False:
                    alarm_status = True
                    t = Thread(target=alarm, args=('wake up sir',)) # Threading part for drowsiness alarm
                    t.deamon = True
                    t.start()

                cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        else:
            COUNTER = 0
            alarm_status = False

        if (distance > YAWN_THRESH):
                cv2.putText(frame, "Yawn Alert", (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                if alarm_status2 == False and saying == False:
                    alarm_status2 = True
                    t = Thread(target=alarm, args=('take some fresh air sir',)) # Threading part for yawn alarm
                    t.deamon = True
                    t.start()
        else:
            alarm_status2 = False

        cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, "YAWN: {:.2f}".format(distance), (300, 60),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)


    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF

    if key == ord("q"):
        break

cv2.destroyAllWindows()
vs.stop()

After executing it in console I get messsage which says:

> > python .\drowsiness_yawn.py
> -> Loading the predictor and detector...
> -> Starting Video Stream call2 call2 Exception in thread Thread-2 (alarm): Traceback (most recent call last):   File
> "C:\Users\avdhu\anaconda3\envs\project\Lib\threading.py", line 1038,
> in _bootstrap_inner
>     self.run()   File "C:\Users\avdhu\anaconda3\envs\project\Lib\threading.py", line 975, in
> run
>     self._target(*self._args, **self._kwargs)   File "C:\Users\avdhu\OneDrive\Documents\Python
> Scripts\Drowsiness-and-Yawn-Detection-with-voice-alert-using-Dlib\drowsiness_yawn.py",
> line 39, in  alarm
>     myobj.save("welcome2.mp3")   File "C:\Users\avdhu\anaconda3\envs\project\Lib\site-packages\gtts\tts.py",
> line 324, in save
>     with open(str(savefile), "wb") as f:
>          ^^^^^^^^^^^^^^^^^^^^^^^^^ PermissionError: [Errno 13] Permission denied: 'welcome2.mp3'

I get PermissionError for "welcome2.mp3" ,alarm responsible for yawn alarm.

Please help me


Solution

  • The issue is that myobj.save("welcome.mp3") may be executed again before the previous execution of myobj.save("welcome.mp3") finishes.
    Executing myobj.save("welcome.mp3") twice in parallel raies "Permission denied" error (the file is already opened for writing when trying to write the file).

    The reason for trying to save the file again while it's being saved, is starting a new alarm thread while alarm thread is already running.
    Executing myobj.save("welcome.mp3") inside a while loop, increases the chance for the issue.

    Creating and starting a new thread multiple times is probably not a good practice, but fixing the design seems outside the scope of the question...


    For preventing reoccurrence of myobj.save("welcome.mp3"), we may put the MP3 file saving inside a "critical section" using threading.Lock as described in the following answer:

    from threading import Thread, Lock
    
    ...
    
    def alarm(msg):  # code for alarm
        global alarm_status_lock
    
        ...
        
        with alarm_status_lock:
            # Critical section is within this block.
            myobj.save("welcome.mp3")
            try:
                ps.playsound("welcome.mp3")
            except:
                pass       
    ...
    
    
    saying = False
    alarm_status_lock = Lock()  # Create global Lock object
    

    Note:
    I had to do some modifications to your code in order to execute the code (the code is definitely not a minimal reproducible code sample).

    Here is the complete updated code that seems solving the issue:

    from scipy.spatial import distance as dist
    from imutils.video import VideoStream
    from imutils import face_utils
    from threading import Thread, Lock
    import numpy as np
    import argparse
    import imutils
    import time
    import dlib
    import cv2
    import os
    import os
    import playsound as ps
    from gtts import gTTS
    
    def alarm(msg):  # code for alarm
        global alarm_status
        global alarm_status2
        global saying
        global alarm_status_lock
        language = 'en'
       
        # Consider saving welcome.mp3 before the loop 
        #if alarm_status:
        #    s = msg+'"'
        #    myobj = gTTS(text=s, lang=language, slow=False)
        #    alarm_status_lock = Lock()
        #    with alarm_status_lock:  # https://stackoverflow.com/a/419213/4926757
        #        # Critical section is within this block.
        #        myobj.save("welcome.mp3")
    
    
        while alarm_status:
            print('call')
            s = msg+'"'
            myobj = gTTS(text=s, lang=language, slow=False)
            
            with alarm_status_lock:  # https://stackoverflow.com/a/419213/4926757
                # Critical section is within this block.
                myobj.save("welcome.mp3")
                # os.system("mpg321 welcome.mp3")
                try:
                    ps.playsound("welcome.mp3")
                except:
                    pass
    
           
    
        if alarm_status2:
            print('call2')
            saying = True
            s =  msg + '"'
            # os.system(s)
            myobj = gTTS(text=s, lang=language, slow=False)
    
            with alarm_status_lock:
                # Critical section is within this block.
                myobj.save("welcome2.mp3")
                try:
                     ps.playsound("welcome2.mp3")
                except:
                    pass
    
           
            saying = False
    
    def eye_aspect_ratio(eye):
        A = dist.euclidean(eye[1], eye[5])
        B = dist.euclidean(eye[2], eye[4])
    
        C = dist.euclidean(eye[0], eye[3])
    
        ear = (A + B) / (2.0 * C)
    
        return ear
    
    def final_ear(shape):
        (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
        (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    
        leftEye = shape[lStart:lEnd]
        rightEye = shape[rStart:rEnd]
    
        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)
    
        ear = (leftEAR + rightEAR) / 2.0
        return (ear, leftEye, rightEye)
    
    def lip_distance(shape):
        top_lip = shape[50:53]
        top_lip = np.concatenate((top_lip, shape[61:64]))
    
        low_lip = shape[56:59]
        low_lip = np.concatenate((low_lip, shape[65:68]))
    
        top_mean = np.mean(top_lip, axis=0)
        low_mean = np.mean(low_lip, axis=0)
    
        distance = abs(top_mean[1] - low_mean[1])
        return distance
    
    
    ap = argparse.ArgumentParser()
    ap.add_argument("-w", "--webcam", type=int, default=0,
                    help="index of webcam on system")
    args = vars(ap.parse_args())
    
    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = 30
    YAWN_THRESH = 20
    alarm_status = False
    alarm_status2 = False
    saying = False
    alarm_status_lock = Lock()    # Create global Lock object
    COUNTER = 0
    
    print("-> Loading the predictor and detector...")
    #detector = dlib.get_frontal_face_detector()
    detector = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")    #https://stackoverflow.com/a/59717885/4926757
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
    
    
    print("-> Starting Video Stream")
    #vs = VideoStream(src=args["webcam"]).start()
    #vs= VideoStream(usePiCamera=True).start()       //For Raspberry Pi
    cap = cv2.VideoCapture(0)  # Use cv2.VideoCapture instead of VideoStream(src=args["webcam"]).start()
    time.sleep(1.0)
    
    while True:
    
        #frame = vs.read()
        ret, frame = cap.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    
        #rects = detector(gray, 0)
        rects = detector.detectMultiScale(gray, scaleFactor=1.1, 
            minNeighbors=5, minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE)
    
        #for rect in rects:
        for (x, y, w, h) in rects:
            rect = dlib.rectangle(int(x), int(y), int(x + w),int(y + h))
            
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)
    
            eye = final_ear(shape)
            ear = eye[0]
            leftEye = eye [1]
            rightEye = eye[2]
    
            distance = lip_distance(shape)
    
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
    
            lip = shape[48:60]
            cv2.drawContours(frame, [lip], -1, (0, 255, 0), 1)
    
            if ear < EYE_AR_THRESH:
                COUNTER += 1
    
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    if alarm_status == False:
                        alarm_status = True
                        t = Thread(target=alarm, args=('wake up sir',)) # Threading part for drowsiness alarm
                        t.deamon = True
                        t.start()
    
                    cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    
            else:
                COUNTER = 0
                alarm_status = False
    
            if (distance > YAWN_THRESH):
                    cv2.putText(frame, "Yawn Alert", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                    if alarm_status2 == False and saying == False:
                        alarm_status2 = True
                        t = Thread(target=alarm, args=('take some fresh air sir',)) # Threading part for yawn alarm
                        t.deamon = True
                        t.start()
            else:
                alarm_status2 = False
    
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "YAWN: {:.2f}".format(distance), (300, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    
    
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
    
        if key == ord("q"):
            break
    
    cv2.destroyAllWindows()
    vs.stop()