1.首先需要的python模塊有:pyaudio允乐、openCV拴魄、moviepy拨匆、pillow边臼、wave主要第三方庫(kù)援岩。
pyaudio錄制音頻
openCV錄制屏幕及對(duì)個(gè)人錄像
moviepy對(duì)錄制的音頻荣德、錄屏及錄像文件同步合成凡蚜。
2.代碼如下借鑒于網(wǎng)絡(luò)导街,代碼可直接運(yùn)行:
import wave
import threading
from os import remove, mkdir, listdir
from datetime import datetime
from time import sleep
import pyaudio
from PIL import ImageGrab
from numpy import array
import cv2
from moviepy.editor import *
import os
import win32api
CHUNK_SIZE = 1024
CHANNELS = 2
FORMAT = pyaudio.paInt16
RATE = 48000
allowRecording = True
event = threading.Event()
path = './video'
if not os.path.exists(path):
os.mkdir(path)
顯示畫面
def imshow(frame):
# color = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
cv2.imshow('v', frame)
cv2.waitKey(40)
def record_audio():
p = pyaudio.PyAudio()
#等待攝像頭啟動(dòng)好,然后大家一起等3秒開始錄制
event.wait()
sleep(3)
# 創(chuàng)建輸入流
stream = p.open(format=FORMAT, channels=CHANNELS,rate=RATE, input=True, frames_per_buffer=CHUNK_SIZE)
wf = wave.open(audio_filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
while allowRecording:
# 從錄音設(shè)備讀取數(shù)據(jù)辰狡, 直接寫入wav文件
data = stream.read(CHUNK_SIZE)
wf.writeframes(data)
wf.close()
stream.stop_stream()
stream.close()
p.terminate()
def record_screen():
# 錄制屏幕
# 等待攝像頭啟動(dòng)好锋叨,然后大家一起等3秒開始錄制
event.wait()
sleep(3)
im = ImageGrab.grab()
video = cv2.VideoWriter(screen_video_filename, cv2.VideoWriter_fourcc(*'XVID'), 25, im.size) # 偵速和視頻寬度、高度
while allowRecording:
im = ImageGrab.grab()
im = cv2.cvtColor(array(im), cv2.COLOR_RGB2BGR)
video.write(im)
video.release()
def record_webcam():
# 參數(shù)0表示筆記本自帶攝像頭
cap = cv2.VideoCapture(0)
# 啟動(dòng)好攝像頭宛篇,發(fā)出通知娃磺,大家一起等3表然后開始錄制
event.set()
sleep(3)
aviFile = cv2.VideoWriter(webcam_video_filename, cv2.VideoWriter_fourcc(*'MJPG'), 25, (640, 480))
while allowRecording and cap.isOpened():
# 捕捉當(dāng)前頭像, ret=True表示成功叫倍,F(xiàn)asle表示失敗
ret, frame = cap.read()
if ret:
aviFile.write(frame)
aviFile.release()
cap.release()
now = str(datetime.now())[:19].replace(":", "_")
audio_filename = f'./video/{now}.mp3'
webcam_video_filename = f'./video/t{now}.avi'
screen_video_filename = f'./video/tt{now}.avi'
video_filename = f'./video/{now}.avi'
創(chuàng)建兩個(gè)線程偷卧, 分別錄音與錄屏
t1 = threading.Thread(target=record_audio)
t2 = threading.Thread(target=record_screen)
t3 = threading.Thread(target=record_webcam)
創(chuàng)建時(shí)間,用戶多個(gè)線程同步吆倦,等攝像頭準(zhǔn)備以后再一起等3秒開始錄制
event.clear()
for t in (t1, t2, t3):
t.start()
等待攝像頭準(zhǔn)備好听诸,提示用戶3秒鐘以后開始錄制
event.wait()
print('3秒后開始錄制,按q鍵結(jié)束錄制')
while True:
if input() == 'q':
break
allowRecording = False
for t in (t1, t2, t3):
t.join()
把錄制的音頻和屏幕截圖合成視頻文件
audio = AudioFileClip(audio_filename)
video1 = VideoFileClip(screen_video_filename)
ratio1 = audio.duration/video1.duration
video1 = (video1.fl_time(lambda t: t/ratio1, apply_to=['video']).set_end(audio.duration))
video2 = VideoFileClip(webcam_video_filename)
ratio2 = audio.duration/video2.duration
video2 = (video2.fl_time(lambda t: t/ratio2, apply_to=['video'])).set_end(audio.duration).resize((320, 240)).set_position(('right','bottom'))
video = CompositeVideoClip([video1, video2]).set_audio(audio)
video.write_videofile(video_filename, codec='libx264', fps=25)
刪除歷史音頻文件和視頻
remove(audio_filename)
remove(screen_video_filename)
remove(webcam_video_filename)