文章目錄
使用opencv打開攝像頭預備知識
- VideoCapture的官網api
- opencv io示意圖
- OpenCV 視頻I / O模塊是一組用於讀取和寫入視頻或圖像序列的類和功能。
打開筆記本自帶攝像頭代碼實戰
- 按q退出
import cv2
if __name__ == '__main__':
cap = cv2.VideoCapture(0) # 設置攝像頭 0是默認的攝像頭 如果你有多個攝像頭的話呢,可以設置1,2,3....
while True: # 進入無限循環
ret, frame = cap.read() # 將攝像頭拍到的圖像作爲frame值
cv2.imshow('frame', frame) # 將frame的值顯示出來 有兩個參數 前一個是窗口名字,後面是值
c = cv2.waitKey(1) # 判斷退出的條件 當按下'Q'鍵的時候呢,就退出
if c == ord('q'):
break
cap.release() # 常規操作
cv2.destroyAllWindows()
- esc退出
import cv2 as cv
def video_demo():
# 0是代表攝像頭編號,只有一個的話默認爲0
capture = cv.VideoCapture(0)
while (True):
ref, frame = capture.read()
cv.imshow("1", frame)
# 等待30ms顯示圖像,若過程中按“Esc”退出
c = cv.waitKey(30) & 0xff
if c == 27:
capture.release()
break
video_demo()
cv.waitKey()
cv.destroyAllWindows()
代碼註釋
1、cv2.VideoCapture()函數:
cap = cv2.VideoCapture(0)
VideoCapture()中參數是0,表示打開筆記本的內置攝像頭。
cap = cv2.VideoCapture("…/1.avi")
VideoCapture("…/1.avi"),表示參數是視頻文件路徑則打開視頻。
2、cap.isOpened()函數:
返回true表示成功,false表示不成功
3、ret,frame = cap.read()函數:
cap.read()按幀讀取視頻,ret,frame是獲cap.read()方法的兩個返回值。其中ret是布爾值,如果讀取幀是正確的則返回True,如果文件讀取到結尾,它的返回值就爲False。frame就是每一幀的圖像,是個三維矩陣。
4、cv2.waitKey()函數:
參數是1,表示延時1ms切換到下一幀圖像,參數過大如cv2.waitKey(1000),會因爲延時過久而卡頓感覺到卡頓。
參數爲0,如cv2.waitKey(0)只顯示當前幀圖像,相當於視頻暫停。
5、cap.release()與destroyAllWindows()函數:
cap.release()釋放視頻,調用destroyAllWindows()關閉所有圖像窗口。
6、c得到的是鍵盤輸入的ASCII碼,esc鍵對應的ASCII碼是27,即當按esc鍵是if條件句成立。
- 使用第三方驅動程序或照相機注意事項
許多工業相機或某些視頻I / O設備不爲操作系統提供標準的驅動程序接口。因此,您不能在這些設備上使用VideoCapture或VideoWriter。
爲了訪問他們的設備,製造商提供了自己的C ++ API和庫,您必須包括它們並與OpenCV應用程序鏈接。
這是一種常見的情況,該庫從/向存儲器緩衝區讀取/寫入圖像。如果是這樣,Mat則可以爲內存緩衝區(用戶分配的數據)創建標頭,並使用OpenCV函數對其進行就地處理。有關更多詳細信息,請參見cv :: Mat :: Mat()
。
opencv打開jeston tx2板載攝像頭
因爲查詢到tx1和tx2打開板載攝像頭的命令不一樣。其中arg1函數
是打開 tx1 的參數。arg2函數
是打開 tx2 的參數。這裏參考了一篇國外的原文附帶的GitHub地址查詢到的。找不到原文了。我將GitHub的源碼附錄在最後以表達對原作者的尊重(帶有作者個人信息)
import cv2
def arg1(width, height):
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
return gst_str
def arg2(width,height):
gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
return gst_str
def open_1():
path = arg1(100, 100)
print("11111path",path)
cap =cv2.VideoCapture(path)
if not cap.isOpened():
print('Failed to open camera!111111111111111111111')
return cap
def open_2():
path = arg2(100, 100)
print("2222path",path)
cap = cv2.VideoCapture(path)
if not cap.isOpened():
print('Failed to open camera!2222222222222222')
return cap
def openVideo(cap):
while True: # 進入無限循環
ret, frame = cap.read() # 將攝像頭拍到的圖像作爲frame值
cv2.imshow('frame', frame) # 將frame的值顯示出來 有兩個參數 前一個是窗口名字,後面是值
c = cv2.waitKey(1) # 判斷退出的條件 當按下'Q'鍵的時候呢,就退出
if c == ord('q'):
break
cap.release() # 常規操作
cv2.destroyAllWindows()
if __name__ == '__main__':
cap_1 = open_1()
cap_2 = open_2()
openVideo(cap_1)
# openVideo(cap_2)
附錄、GitHub打開攝像頭的源碼
# --------------------------------------------------------
# Camera sample code for Tegra X2/X1
#
# This program could capture and display video from
# IP CAM, USB webcam, or the Tegra onboard camera.
# Refer to the following blog post for how to set up
# and run the code:
# https://jkjung-avt.github.io/tx2-camera-with-python/
#
# Written by JK Jung <[email protected]>
# --------------------------------------------------------
import sys
import argparse
import subprocess
import cv2
WINDOW_NAME = 'CameraDemo'
def parse_args():
# Parse input arguments
desc = 'Capture and display live camera video on Jetson TX2/TX1'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--rtsp', dest='use_rtsp',
help='use IP CAM (remember to also set --uri)',
action='store_true')
parser.add_argument('--uri', dest='rtsp_uri',
help='RTSP URI, e.g. rtsp://192.168.1.64:554',
default=None, type=str)
parser.add_argument('--latency', dest='rtsp_latency',
help='latency in ms for RTSP [200]',
default=200, type=int)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='device # of USB webcam (/dev/video?) [1]',
default=1, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [1920]',
default=1920, type=int)
parser.add_argument('--height', dest='image_height',
help='image height [1080]',
default=1080, type=int)
args = parser.parse_args()
return args
def open_cam_rtsp(uri, width, height, latency):
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
# We want to set width and height here, otherwise we could just do:
# return cv2.VideoCapture(dev)
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvcamerasrc' in gst_elements:
# On versions of L4T prior to 28.1, add 'flip-method=2' into gst_str
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
elif 'nvarguscamerasrc' in gst_elements:
gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
else:
raise RuntimeError('onboard camera source not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_window(width, height):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(WINDOW_NAME, width, height)
cv2.moveWindow(WINDOW_NAME, 0, 0)
cv2.setWindowTitle(WINDOW_NAME, 'Camera Demo for Jetson TX2/TX1')
def read_cam(cap):
show_help = True
full_scrn = False
help_text = '"Esc" to Quit, "H" for Help, "F" to Toggle Fullscreen'
font = cv2.FONT_HERSHEY_PLAIN
while True:
if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
# Check to see if the user has closed the window
# If yes, terminate the program
break
_, img = cap.read() # grab the next image frame from camera
if show_help:
cv2.putText(img, help_text, (11, 20), font,
1.0, (32, 32, 32), 4, cv2.LINE_AA)
cv2.putText(img, help_text, (10, 20), font,
1.0, (240, 240, 240), 1, cv2.LINE_AA)
cv2.imshow(WINDOW_NAME, img)
key = cv2.waitKey(10)
if key == 27: # ESC key: quit program
break
elif key == ord('H') or key == ord('h'): # toggle help message
show_help = not show_help
elif key == ord('F') or key == ord('f'): # toggle fullscreen
full_scrn = not full_scrn
if full_scrn:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL)
def main():
args = parse_args()
print('Called with args:')
print(args)
print('OpenCV version: {}'.format(cv2.__version__))
if args.use_rtsp:
cap = open_cam_rtsp(args.rtsp_uri,
args.image_width,
args.image_height,
args.rtsp_latency)
elif args.use_usb:
cap = open_cam_usb(args.video_dev,
args.image_width,
args.image_height)
else: # by default, use the Jetson onboard camera
cap = open_cam_onboard(args.image_width,
args.image_height)
if not cap.isOpened():
sys.exit('Failed to open camera!')
open_window(args.image_width, args.image_height)
read_cam(cap)
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
附錄、c++打開攝像頭
#include <opencv.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main() {
// 顯示opencv的版本
cout << "the version of opencv is : " << CV_VERSION << endl;
Mat image;
image = imread("lena.jpg");
if (image.empty()) {
printf("src is empty");
return -1;
}
//VideoCapture capture("1.mp4");
VideoCapture capture;
capture.open("1.mp4");
while (true)
{
Mat frame;
capture >> frame;
if (frame.empty())
{
break;
}
else {
imshow("frame by frame", frame);
waitKey(10);
}
}
return 0;
}
附錄、c++打開圖片
#include <opencv.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include<opencv4/opencv2/core.hpp>
using namespace cv;
using namespace std;
int main(){
Mat src = imread("lena.jpg");
// 非空判斷
if (src.empty()) {
printf("src is empty");
return -1;
}
// opencv的UI窗口,由opencv自己創建和釋放
// WINDOW_AUTOSIZE 自適應文件大小,不支持窗口大小調整
// WINDOW_NORMAL 可以在qt上使用,用來調增大小
namedWindow("show image windos", WINDOW_AUTOSIZE);
imshow("show image windos", src);
// 色彩空間轉換
namedWindow("change_image");
Mat out_image;
cvtColor(src,out_image, COLOR_BGR2GRAY);
imshow("change_image", out_image);
waitKey(0);
}
附錄c++、邊緣檢測
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main() {
Mat image;
image = imread("lena.jpg");
if (image.empty()) {
printf("src is empty");
return -1;
}
// 調用攝像頭
VideoCapture capture(0);
while (true)
{
Mat frame;
capture>>frame;
// 圖像轉爲灰度
cvtColor(frame, frame, COLOR_BGR2GRAY);
// 3*3內核降噪
blur(frame, frame, Size(7, 7));
// canny邊緣檢測
Canny(frame,
frame, 0, 30, 3);
imshow("frame by frame", frame);
waitKey(10);
}
return 0;
}