ぼかし、音

その辺のWEBを参考に音を出した。
 
 
import glob
import torch
import cv2
import numpy as np
import os
import threading
import queue
import datetime
import time
import re
import moviepy.editor as mp

input_dir = '/content/drive/MyDrive/py_mosa/input/'
split_dir = '/content/drive/MyDrive/py_mosa/work1/'
work_dir = '/content/drive/MyDrive/py_mosa/work2/'
output_dir = '/content/drive/MyDrive/py_mosa/output/'

checkPointFilePath = '/content/drive/MyDrive/py_mosa/checkpoint.txt'

fileSplitSize = 250

def getCheckPoint():
    ret = [NoneNone]

    if not os.path.exists(checkPointFilePath):
        return ret
    f = open(checkPointFilePath)
    for i in range(2):
        line = f.readline()
        if line:
            ret[i] = line.replace('\n','')
        else:
            break

    f.close()
    del f
    return ret

def putCheckPoint(inp1inp2):
    f = open(checkPointFilePath, "w")
    f.write(inp1)
    f.write("\n")
    if inp2 is not None:
      f.write(inp2)
      f.write("\n")
    f.close()
    del f

def del_workdir():
    files = glob.glob(split_dir + "*")
    for f in files:
        os.remove(f)
    del files

    files = glob.glob(work_dir + "*")
    for f in files:
        os.remove(f)
    del files

def movie_split(input_videoout_video):

    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m''p''4''v')
    fnum = 1
    writer = cv2.VideoWriter(split_dir + str(fnum) + "_" + out_video, fmt, float(frame_rate),(w,h))
    num = 0
    fchkp = 0
    while(video.isOpened()):
        ret,frame = video.read()
        if ret == True:
            if fchkp >= fileSplitSize:
                writer.release()
                fnum += 1
                writer = cv2.VideoWriter(split_dir + str(fnum) + "_" + out_video, fmt, float(frame_rate),(w,h))
                fchkp = 0
            else:
                pass

            writer.write(frame)
            num += 1
            fchkp += 1
        else:
            break
    writer.release()
    video.release()
    del writer
    del video

def atoi(text):
    return int(text) if text.isdigit() else text

def natural_keys(text):
    return [ atoi(c) for c in re.split(r'(\d+)', text) ]

def movie_join(input_videoout_video):
    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m''p''4''v')
    video.release()
    writer = cv2.VideoWriter(output_dir + out_video, fmt, float(frame_rate),(w,h))

    files = sorted(glob.glob(work_dir + "*"), key=natural_keys)
    for f in files:
        video = cv2.VideoCapture(f)
        while(video.isOpened()):
            ret,frame = video.read()
            if ret == True:
                writer.write(frame)
            else:
                break
        video.release()
        del video

    writer.release()
    del writer

def mosaic(img):
    img = cv2.blur(img, (3030))
    return img

def movie_mosaic(input_videoout_video):
    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m''p''4''v')
    writer = cv2.VideoWriter(out_video, fmt, float(frame_rate),(w,h))
    num = 0
    while(video.isOpened()):
        ret,frame = video.read()
        if ret == True:
            results = model(frame)
            results.xyxy[0]
            results.pandas().xyxy[0]
            for j in range(len(results.pandas().xyxy[0])):

               if results.pandas().xyxy[0]["class"][j] == 0 \
                  or results.pandas().xyxy[0]["class"][j] == 2:
                    ymin = int(results.pandas().xyxy[0]["ymin"][j])
                    ymax = int(results.pandas().xyxy[0]["ymax"][j])
                    xmin = int(results.pandas().xyxy[0]["xmin"][j])
                    xmax = int(results.pandas().xyxy[0]["xmax"][j])
                    frame[ymin:ymax,xmin:xmax] = mosaic(frame[ymin:ymax,xmin:xmax])
               else:
                    pass
            writer.write(frame)
            num += 1
        else:
            break
    writer.release()
    video.release()
    del writer
    del video

def set_audio(srcfile,imgfile,outfile):
    # Extract audio from input video.                                                                     
    clip_input = mp.VideoFileClip(srcfile)
    clip_input.audio.write_audiofile('/content/drive/MyDrive/py_mosa/output/audio.mp3')
    # Add audio to output video.                                                                          
    clip = mp.VideoFileClip(imgfile).subclip()
    clip.write_videofile(outfile, audio='/content/drive/MyDrive/py_mosa/output/audio.mp3')

if __name__ == '__main__':
    model = torch.hub.load('ultralytics/yolov5''yolov5x', pretrained=True, force_reload=True)

    chkPoint1,chkPoint2 = getCheckPoint()
    files = sorted(glob.glob(input_dir + "*.mp4"), key=natural_keys)
    for f in files:
        input_video = f
        if chkPoint1 is not None:
            if chkPoint1 == input_video:
                chkPoint1 = None
            else:
                print("skip1", input_video)
                continue

        if chkPoint2 is None:
            putCheckPoint(input_video, None)

        out_video_fname = os.path.basename(f)
        if chkPoint2 is None:
            del_workdir()
            movie_split(input_video, out_video_fname)
        queF = queue.Queue()
        files2 = sorted(glob.glob(split_dir + "*.mp4"), key=natural_keys)
        for f2 in files2:
            if chkPoint2 is not None:
                if chkPoint2 == f2:
                    chkPoint2 = None
                else:
                    print("skip2", f2)
                    continue
            queF.put(f2)
        del files2
        
        while True:

            print(input_video, "queF.unfinished_tasks", queF.unfinished_tasks)
            if queF.unfinished_tasks == 0:
                break

            wInput_video = queF.get()
            wOut_video = work_dir + os.path.basename(wInput_video)
            movie_mosaic(wInput_video, wOut_video)
            putCheckPoint(input_video, wInput_video)
            queF.task_done()

        movie_join(input_video, out_video_fname)
        set_audio(input_video,output_dir + out_video_fname,(output_dir + out_video_fname).replace(".mp4""_audio.mp4"))

    del_workdir()
    os.remove(checkPointFilePath)
    del model


img = cv2.blur(img, (30, 30))でぼかした。

モザイクだったところをimg = cv2.blur(img, (3030))でぼかした。

 

 

 

 

 

 

import glob
import torch
import cv2
import numpy as np
import os
import threading
import queue
import datetime
import time
import re

input_dir = '/content/drive/MyDrive/py_mosa/input/'
split_dir = '/content/drive/MyDrive/py_mosa/work1/'
work_dir = '/content/drive/MyDrive/py_mosa/work2/'
output_dir = '/content/drive/MyDrive/py_mosa/output/'

checkPointFilePath = '/content/drive/MyDrive/py_mosa/checkpoint.txt'

fileSplitSize = 250

def getCheckPoint():
    ret = [NoneNone]

    if not os.path.exists(checkPointFilePath):
        return ret
    f = open(checkPointFilePath)
    for i in range(2):
        line = f.readline()
        if line:
            ret[i] = line.replace('\n','')
        else:
            break

    f.close()
    del f
    return ret

def putCheckPoint(inp1inp2):
    f = open(checkPointFilePath, "w")
    f.write(inp1)
    f.write("\n")
    if inp2 is not None:
      f.write(inp2)
      f.write("\n")
    f.close()
    del f

def del_workdir():
    files = glob.glob(split_dir + "*")
    for f in files:
        os.remove(f)
    del files

    files = glob.glob(work_dir + "*")
    for f in files:
        os.remove(f)
    del files

def movie_split(input_videoout_video):

    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m''p''4''v')
    fnum = 1
    writer = cv2.VideoWriter(split_dir + str(fnum) + "_" + out_video, fmt, float(frame_rate),(w,h))
    num = 0
    fchkp = 0
    while(video.isOpened()):
        ret,frame = video.read()
        if ret == True:
            if fchkp >= fileSplitSize:
                writer.release()
                fnum += 1
                writer = cv2.VideoWriter(split_dir + str(fnum) + "_" + out_video, fmt, float(frame_rate),(w,h))
                fchkp = 0
            else:
                pass

            writer.write(frame)
            num += 1
            fchkp += 1
        else:
            break
    writer.release()
    video.release()
    del writer
    del video

def atoi(text):
    return int(text) if text.isdigit() else text

def natural_keys(text):
    return [ atoi(c) for c in re.split(r'(\d+)', text) ]

def movie_join(input_videoout_video):
    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m''p''4''v')
    video.release()
    writer = cv2.VideoWriter(output_dir + out_video, fmt, float(frame_rate),(w,h))

    files = sorted(glob.glob(work_dir + "*"), key=natural_keys)
    for f in files:
        video = cv2.VideoCapture(f)
        while(video.isOpened()):
            ret,frame = video.read()
            if ret == True:
                writer.write(frame)
            else:
                break
        video.release()
        del video

    writer.release()
    del writer

def mosaic(img):
    img = cv2.blur(img, (3030))
    return img

def movie_mosaic(input_videoout_video):
    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m''p''4''v')
    writer = cv2.VideoWriter(out_video, fmt, float(frame_rate),(w,h))
    num = 0
    while(video.isOpened()):
        ret,frame = video.read()
        if ret == True:
            results = model(frame)
            results.xyxy[0]
            results.pandas().xyxy[0]
            for j in range(len(results.pandas().xyxy[0])):

               if results.pandas().xyxy[0]["class"][j] == 0 \
                  or results.pandas().xyxy[0]["class"][j] == 2:
                    ymin = int(results.pandas().xyxy[0]["ymin"][j])
                    ymax = int(results.pandas().xyxy[0]["ymax"][j])
                    xmin = int(results.pandas().xyxy[0]["xmin"][j])
                    xmax = int(results.pandas().xyxy[0]["xmax"][j])
                    frame[ymin:ymax,xmin:xmax] = mosaic(frame[ymin:ymax,xmin:xmax])
               else:
                    pass
            writer.write(frame)
            num += 1
        else:
            break
    writer.release()
    video.release()
    del writer
    del video

if __name__ == '__main__':
    model = torch.hub.load('ultralytics/yolov5''yolov5x', pretrained=True, force_reload=True)

    chkPoint1,chkPoint2 = getCheckPoint()
    files = sorted(glob.glob(input_dir + "*.mp4"), key=natural_keys)
    for f in files:
        input_video = f
        if chkPoint1 is not None:
            if chkPoint1 == input_video:
                chkPoint1 = None
            else:
                print("skip1", input_video)
                continue

        if chkPoint2 is None:
            putCheckPoint(input_video, None)

        out_video_fname = os.path.basename(f)
        if chkPoint2 is None:
            del_workdir()
            movie_split(input_video, out_video_fname)
        queF = queue.Queue()
        files2 = sorted(glob.glob(split_dir + "*.mp4"), key=natural_keys)
        for f2 in files2:
            if chkPoint2 is not None:
                if chkPoint2 == f2:
                    chkPoint2 = None
                else:
                    print("skip2", f2)
                    continue
            queF.put(f2)
        del files2
        
        while True:

            print(input_video, "queF.unfinished_tasks", queF.unfinished_tasks)
            if queF.unfinished_tasks == 0:
                break

            wInput_video = queF.get()
            wOut_video = work_dir + os.path.basename(wInput_video)
            movie_mosaic(wInput_video, wOut_video)
            putCheckPoint(input_video, wInput_video)
            queF.task_done()

        movie_join(input_video, out_video_fname)

    del_workdir()
    os.remove(checkPointFilePath)
    del model


nbch.hatenadiary.org

 

Rate exceeded がでるので、スレッドは止めてみた・・・

 

import glob
import torch
import cv2
import numpy as np
import os
import threading
import queue
import datetime
import time
import re

input_dir = '/content/drive/MyDrive/py_mosa/input/'
split_dir = '/content/drive/MyDrive/py_mosa/work1/'
work_dir = '/content/drive/MyDrive/py_mosa/work2/'
output_dir = '/content/drive/MyDrive/py_mosa/output/'

checkPointFilePath = '/content/drive/MyDrive/py_mosa/checkpoint.txt'

fileSplitSize = 250

def getCheckPoint():
    ret = [None, None]

    if not os.path.exists(checkPointFilePath):
        return ret
    f = open(checkPointFilePath)
    for i in range(2):
        line = f.readline()
        if line:
            ret[i] = line.replace('\n','')
        else:
            break

    f.close()
    del f
    return ret

def putCheckPoint(inp1, inp2):
    f = open(checkPointFilePath, "w")
    f.write(inp1)
    f.write("\n")
    if inp2 is not None:
      f.write(inp2)
      f.write("\n")
    f.close()
    del f

def del_workdir():
    files = glob.glob(split_dir + "*")
    for f in files:
        os.remove(f)
    del files

    files = glob.glob(work_dir + "*")
    for f in files:
        os.remove(f)
    del files

def movie_split(input_video, out_video):

    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    fnum = 1
    writer = cv2.VideoWriter(split_dir + str(fnum) + "_" + out_video, fmt, float(frame_rate),(w,h))
    num = 0
    fchkp = 0
    while(video.isOpened()):
        ret,frame = video.read()
        if ret == True:
            if fchkp >= fileSplitSize:
                writer.release()
                fnum += 1
                writer = cv2.VideoWriter(split_dir + str(fnum) + "_" + out_video, fmt, float(frame_rate),(w,h))
                fchkp = 0
            else:
                pass

            writer.write(frame)
            num += 1
            fchkp += 1
        else:
            break
    writer.release()
    video.release()
    del writer
    del video

def atoi(text):
    return int(text) if text.isdigit() else text

def natural_keys(text):
    return [ atoi(c) for c in re.split(r'(\d+)', text) ]

def movie_join(input_video, out_video):
    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    video.release()
    writer = cv2.VideoWriter(output_dir + out_video, fmt, float(frame_rate),(w,h))

    files = sorted(glob.glob(work_dir + "*"), key=natural_keys)
    for f in files:
        video = cv2.VideoCapture(f)
        while(video.isOpened()):
            ret,frame = video.read()
            if ret == True:
                writer.write(frame)
            else:
                break
        video.release()
        del video

    writer.release()
    del writer

def mosaic(img, alpha):
    w = img.shape[1]
    h = img.shape[0] 

    if w < 20 or h < 20:
        return img
    else:
        pass

    img = cv2.resize(img, (int(w*alpha), int(h*alpha)))
    img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST)
    return img

def movie_mosaic(input_video, out_video):
    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    writer = cv2.VideoWriter(out_video, fmt, float(frame_rate),(w,h))
    num = 0
    while(video.isOpened()):
        ret,frame = video.read()
        if ret == True:
            results = model(frame)
            results.xyxy[0]
            results.pandas().xyxy[0]
            for j in range(len(results.pandas().xyxy[0])):

               if results.pandas().xyxy[0]["class"][j] == 0 or results.pandas().xyxy[0]["class"][j] == 2:
                    ymin = int(results.pandas().xyxy[0]["ymin"][j])
                    ymax = int(results.pandas().xyxy[0]["ymax"][j])
                    xmin = int(results.pandas().xyxy[0]["xmin"][j])
                    xmax = int(results.pandas().xyxy[0]["xmax"][j])
                    frame[ymin:ymax,xmin:xmax] = mosaic(frame[ymin:ymax,xmin:xmax],  alpha)
               else:
                    pass
            writer.write(frame)
            num += 1
        else:
            break
    writer.release()
    video.release()
    del writer
    del video

if __name__ == '__main__':
    model = torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True, force_reload=True)

    chkPoint1,chkPoint2 = getCheckPoint()
    alpha = 0.05
    files = sorted(glob.glob(input_dir + "*.mp4"), key=natural_keys)
    for f in files:
        input_video = f
        if chkPoint1 is not None:
            if chkPoint1 == input_video:
                chkPoint1 = None
            else:
                print("skip1", input_video)
                continue

        if chkPoint2 is None:
            putCheckPoint(input_video, None)

        out_video_fname = os.path.basename(f)
        if chkPoint2 is None:
            del_workdir()
            movie_split(input_video, out_video_fname)
        queF = queue.Queue()
        files2 = sorted(glob.glob(split_dir + "*.mp4"), key=natural_keys)
        for f2 in files2:
            if chkPoint2 is not None:
                if chkPoint2 == f2:
                    chkPoint2 = None
                else:
                    print("skip2", f2)
                    continue
            queF.put(f2)
        del files2
        
        while True:

            print(input_video, "queF.unfinished_tasks", queF.unfinished_tasks)
            if queF.unfinished_tasks == 0:
                break

            wInput_video = queF.get()
            wOut_video = work_dir + os.path.basename(wInput_video)
            movie_mosaic(wInput_video, wOut_video)
            putCheckPoint(input_video, wInput_video)
            queF.task_done()

        movie_join(input_video, out_video_fname)

    del_workdir()
    os.remove(checkPointFilePath)
    del model

 

python初心者がcolabる

無料枠でメモリ間に合うようにせこく分割したりしてる。

何で音が消えるんだ?

 

 

import glob
import torch
import cv2
import numpy as np
import os
import threading
import queue
import datetime
import time
import re

input_dir = '/content/drive/MyDrive/py_mosa/input/'
split_dir = '/content/drive/MyDrive/py_mosa/work1/'
work_dir = '/content/drive/MyDrive/py_mosa/work2/'
output_dir = '/content/drive/MyDrive/py_mosa/output/'

checkPointFilePath = '/content/drive/MyDrive/py_mosa/checkpoint.txt'

thCnt = 4
fileSplitSize = 250

modelFirst =torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True)
del modelFirst

def getCheckPoint():
    ret = [None, None]

    if not os.path.exists(checkPointFilePath):
        return ret
    f = open(checkPointFilePath)
    for i in range(2):
        line = f.readline()
        if line:
            ret[i] = line.replace('\n','')
        else:
            break

    f.close()
    del f
    return ret

def putCheckPoint(inp1, inp2):
    f = open(checkPointFilePath, "w")
    f.write(inp1)
    f.write("\n")
    if inp2 is not None:
      f.write(inp2)
      f.write("\n")
    f.close()
    del f

def del_workdir():
    files = glob.glob(split_dir + "*")
    for f in files:
        os.remove(f)
    del files

    files = glob.glob(work_dir + "*")
    for f in files:
        os.remove(f)
    del files

def movie_split(input_video, out_video):

    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    fnum = 1
    writer = cv2.VideoWriter(split_dir + str(fnum) + "_" + out_video, fmt, float(frame_rate),(w,h))
    num = 0
    fchkp = 0
    while(video.isOpened()):
        ret,frame = video.read()
        if ret == True:
            if fchkp >= fileSplitSize:
                writer.release()
                fnum += 1
                writer = cv2.VideoWriter(split_dir + str(fnum) + "_" + out_video, fmt, float(frame_rate),(w,h))
                fchkp = 0
            else:
                pass

            writer.write(frame)
            num += 1
            fchkp += 1
        else:
            break
    writer.release()
    video.release()
    del writer
    del video

def atoi(text):
    return int(text) if text.isdigit() else text

def natural_keys(text):
    return [ atoi(c) for c in re.split(r'(\d+)', text) ]

def movie_join(input_video, out_video):
    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    video.release()
    writer = cv2.VideoWriter(output_dir + out_video, fmt, float(frame_rate),(w,h))

    files = sorted(glob.glob(work_dir + "*"), key=natural_keys)
    for f in files:
        video = cv2.VideoCapture(f)
        while(video.isOpened()):
            ret,frame = video.read()
            if ret == True:
                writer.write(frame)
            else:
                break
        video.release()
        del video

    writer.release()
    del writer

def mosaic(img, alpha):
    w = img.shape[1]
    h = img.shape[0] 

    if w < 20 or h < 20:
        return img
    else:
        pass

    img = cv2.resize(img, (int(w*alpha), int(h*alpha)))
    img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST)
    return img

def movie_mosaic(input_video, out_video):
    model =torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True)
    video = cv2.VideoCapture(input_video)
    w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    size = (w,h)
    frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    writer = cv2.VideoWriter(out_video, fmt, float(frame_rate),(w,h))
    num = 0
    while(video.isOpened()):
        ret,frame = video.read()
        if ret == True:
            results = model(frame)
            results.xyxy[0]
            results.pandas().xyxy[0]
            for j in range(len(results.pandas().xyxy[0])):

               if results.pandas().xyxy[0]["class"][j] == 0 or results.pandas().xyxy[0]["class"][j] == 2:
                    ymin = int(results.pandas().xyxy[0]["ymin"][j])
                    ymax = int(results.pandas().xyxy[0]["ymax"][j])
                    xmin = int(results.pandas().xyxy[0]["xmin"][j])
                    xmax = int(results.pandas().xyxy[0]["xmax"][j])
                    frame[ymin:ymax,xmin:xmax] = mosaic(frame[ymin:ymax,xmin:xmax],  alpha)
               else:
                    pass
            writer.write(frame)
            num += 1
        else:
            break
    writer.release()
    video.release()
    del writer
    del video
    del model

class ThreadingMosaic(threading.Thread):
    def __init__(self, input_video, out_video):
        super(ThreadingMosaic, self).__init__()
        self.input_video = input_video
        self.out_video = out_video

    def run(self):
        movie_mosaic(self.input_video, self.out_video)

    def inputfile(self):
        return self.input_video

if __name__ == '__main__':
    chkPoint1,chkPoint2 = getCheckPoint()
    alpha = 0.05
    files = sorted(glob.glob(input_dir + "*.mp4"), key=natural_keys)
    for f in files:
        input_video = f
        if chkPoint1 is not None:
            if chkPoint1 == input_video:
                chkPoint1 = None
            else:
                print("skip1", input_video)
                continue

        if chkPoint2 is None:
            putCheckPoint(input_video, None)

        out_video_fname = os.path.basename(f)
        if chkPoint2 is None:
            del_workdir()
            movie_split(input_video, out_video_fname)
        queF = queue.Queue()
        thMap = {}
        for i in range(thCnt):
            thMap[str(i)] = None
        files2 = sorted(glob.glob(split_dir + "*.mp4"), key=natural_keys)
        for f2 in files2:
            if chkPoint2 is not None:
                if chkPoint2 == f2:
                    chkPoint2 = None
                else:
                    print("skip2", f2)
                    continue
            queF.put(f2)
        del files2
        
        while True:

            print(input_video, "queF.unfinished_tasks", queF.unfinished_tasks)
            if queF.unfinished_tasks == 0:
                break

            for i in range(thCnt):
                thMapKey = str(i)
                wInput_video = queF.get()
                wOut_video = work_dir + os.path.basename(wInput_video)
                threadingMosaic = ThreadingMosaic(wInput_video, wOut_video)
                thMap[thMapKey] = threadingMosaic;
                threadingMosaic.start()
                queF.task_done()
                if queF.unfinished_tasks == 0:
                    break

            for i in range(thCnt):
                thMapKey = str(i)
                if thMap[str(i)] is not None:
                    putCheckPoint(input_video, thMap[str(i)].inputfile())
                    thMap[str(i)].join()

        movie_join(input_video, out_video_fname)

    del_workdir()
    os.remove(checkPointFilePath)