本節(jié)介紹了Python完成直播推流作用栗恩,主要是經(jīng)過(guò)opencv讀取視頻對(duì)視頻分割為幀透乾,本文經(jīng)過(guò)實(shí)例代碼講解的非常具體
做任何事情都有一定的思路,寫(xiě)代碼更是如此磕秤,直播推流的整體思路如下:
opencv讀取視頻
將視頻分割為幀
對(duì)每一幀進(jìn)行處理(opencv模板匹配)
在將此幀寫(xiě)入pipe管道
利用ffmpeg進(jìn)行推流直播
在處理本地視頻時(shí)乳乌,并沒(méi)有延時(shí)卡頓的情況。但對(duì)實(shí)時(shí)視頻流的時(shí)候市咆,呈現(xiàn)了卡頓延時(shí)的作用汉操。在一頓度娘操作之后,采取了多線程的辦法蒙兰。
opencv讀取視頻
#一個(gè)莫名其妙的python群:740322234
def run_opencv_camera():
video_stream_path = 0
# 當(dāng)video_stream_path = 0 會(huì)開(kāi)啟計(jì)算機(jī) 默認(rèn)攝像頭 也可以為本地視頻文件的路徑
cap = cv2.VideoCapture(video_stream_path)
while cap.isOpened():
is_opened, frame = cap.read()
cv2.imshow('frame', frame)
cv2.waitKey(1)
cap.release()
OpenCV模板匹配
模板匹配就是在一幅圖像中尋找一個(gè)特定目標(biāo)的方法之一磷瘤,這種方法的原理非常簡(jiǎn)單,遍歷圖像中每一個(gè)可能的位置搜变,比較各處與模板是否相似采缚,當(dāng)相似度足夠高時(shí),就認(rèn)為找到了目標(biāo)挠他。
#一個(gè)莫名其妙的python群:740322234
def template_match(img_rgb):
# 灰度轉(zhuǎn)換
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
# 模板匹配
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
# 設(shè)置閾值
threshold = 0.8
loc = np.where(res >= threshold)
if len(loc[0]):
# 這里直接固定區(qū)域
cv2.rectangle(img_rgb, (155, 515), (1810, 820), (0, 0, 255), 3)
cv2.putText(img_rgb, category, (240, 600), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(img_rgb, Confidence, (240, 640), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(img_rgb, Precision, (240, 680), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(img_rgb, product_yield, (240, 720), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(img_rgb, result, (240, 780), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 5)
return img_rgb
FFmpeg推流
在Ubuntu 14 上安裝 Nginx-RTMP 流媒體服務(wù)器(可以百度)
import subprocess as sp
rtmpUrl = ""
camera_path = ""
cap = cv.VideoCapture(camera_path)
# Get video information
fps = int(cap.get(cv.CAP_PROP_FPS))
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
# ffmpeg command
command = ['ffmpeg',
'-y',
'-f', 'rawvideo',
'-vcodec','rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(width, height),
'-r', str(fps),
'-i', '-',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast',
'-f', 'flv',
rtmpUrl]
# 管道配置
p = sp.Popen(command, stdin=sp.PIPE)
# read webcamera
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
print("Opening camera is failed")
break
# process frame
# your code
# process frame
# write to pipe
p.stdin.write(frame.tostring())
說(shuō)明:rtmp是要接受視頻的服務(wù)器扳抽,服務(wù)器按照上面所給連接地址即可。
多線程處理
#一個(gè)莫名其妙的python群:740322234
def image_put(q):
# 采取本地視頻驗(yàn)證
cap = cv2.VideoCapture("./new.mp4")
# 采取視頻流的方式
# cap = cv2.VideoCapture(0)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH,1920)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT,1080)
if cap.isOpened():
print('success')
else:
print('faild')
while True:
q.put(cap.read()[1])
q.get() if q.qsize() > 1 else time.sleep(0.01)
def image_get(q):
while True:
# start = time.time()
#flag += 1
frame = q.get()
frame = template_match(frame)
# end = time.time()
# print("the time is", end-start)
cv2.imshow("frame", frame)
cv2.waitKey(0)
# pipe.stdin.write(frame.tostring())
#cv2.imwrite(save_path + "%d.jpg"%flag,frame)
# 多線程執(zhí)行一個(gè)攝像頭
def run_single_camera():
# 初始化
mp.set_start_method(method='spawn') # init
# 隊(duì)列
queue = mp.Queue(maxsize=2)
processes = [mp.Process(target=image_put, args=(queue, )),
mp.Process(target=image_get, args=(queue, ))]
[process.start() for process in processes]
[process.join() for process in processes]
def run():
run_single_camera() # quick, with 2 threads
pass
運(yùn)用Python3自帶的多線程模塊mutilprocessing模塊,創(chuàng)建一個(gè)行列贸呢,線程A從經(jīng)過(guò)rstp協(xié)議從視頻流中讀取出每一幀镰烧,并放入行列中,線程B從行列中將圖片取出楞陷,處理后進(jìn)行顯示怔鳖。線程A假如發(fā)現(xiàn)行列里有兩張圖片,即線程B的讀取速度跟不上線程A猜谚,那么線程A主動(dòng)將行列里邊的舊圖片刪掉败砂,換新圖片
pyhton工具包->
PDF資料->
全部源碼領(lǐng)取->
鏈接:https://pan.baidu.com/s/1PVRKKYhVfJB55QUbzIgMPg
提取碼:oinv