FFmpeg讀取USB攝像頭H264幀rtmp推流

文章目錄


經過這幾天的驗證,終於走通了FFmpeg讀取USB攝像頭H264幀,然後用rtmp推流。使用的版本是4.0.2
FFmpeg的源碼在ubuntu16.04上的編譯就不說了,這個網上的文章很多,這裏我要說的是,好像FFmpeg對v4l2的封裝,不能從攝像頭多種輸出格式數據中,選擇V4L2_PIX_FMT_H264這種格式的數據輸出,只能是默認的輸出,這點還有研究明白。
沒辦法只能寫v4l2的操作,將數據保存到內存中,在用FFmpeg從內存中讀取數據,最後用rtmp推流。
這裏要非常感謝雷神的兩篇博客:

可以說沒有這兩篇博客,我還要摸索很久,可惜他不在世了;他的貢獻現在還在讓我們受益。

代碼

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <fcntl.h>
#include <malloc.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/poll.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <libavutil/time.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>

#define DEV_TYPE		"video4linux2"
#define DEV_NAME		"/dev/video1"
#define MAX_CHANNEL 	(4)
#define AV_IO_BUF_SIZE	(96*1024)
#define CLEAR(x) 		memset(&(x), 0, sizeof(x))

struct buffer {
	void   *start;
	size_t length;
};

struct usbcamera_node
{
	int channel;
	char id[32];
	int usb_port;
	//V4L2
	char devname[32];
	int fd;
	struct v4l2_format fmt;
	struct v4l2_streamparm parm;
	struct v4l2_requestbuffers req;
	struct buffer *buffers;
	int n_buffers;
	int poll_index[MAX_CHANNEL];

};

struct usbcamera_node usbcamra;
struct pollfd usbcamra_poll_fd[MAX_CHANNEL];
nfds_t usbcamra_poll_fd_num = 0;
unsigned int frame_len = 0;
unsigned int frame_cnt = 0;

int avError(int errNum);

static int xioctl(int fh, int request, void *arg)
{
	int r;
	do
	{
		r = ioctl(fh, request, arg);
	} while (-1 == r && EINTR == errno);

	return r;
}

static int video_init(struct usbcamera_node *camera_node)
{
	struct v4l2_capability cap;
	struct v4l2_fmtdesc fmtdesc;
	int ret = 0;

	// open the video device with the API of open()
	camera_node->fd = open(camera_node->devname, O_RDWR | O_NONBLOCK, 0);

	if (-1 == camera_node->fd)
	{
		fprintf(stderr, "Cannot open '%s': %d, %s\n", camera_node->devname, errno, strerror(errno));
		return -1;
	}

	// inquire video device capability with the API of ioctl
	if (-1 == xioctl(camera_node->fd, VIDIOC_QUERYCAP, &cap))
	{
		fprintf(stderr, "%s is no V4L2 device\n", camera_node->devname);
		return -1;
	}

	// Set video device settings
	if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
	{
		fprintf(stderr, "%s is no video capture device\n", camera_node->devname);
		return -1;
	}

	if (!(cap.capabilities & V4L2_CAP_STREAMING))
	{
		fprintf(stderr, "%s does not support streaming i/o\n", camera_node->devname);
		return -1;
	}

	printf("\nVIDOOC_QUERYCAP\n");
	printf("the camera driver is: %s\n", cap.driver);
	printf("the camera card is: %s\n", cap.card);
	printf("the camera bus info is: %s\n", cap.bus_info);
	printf("the version is: %d\n", cap.version);
	printf("the capabilities is: 0x%x\n", cap.capabilities);
	printf("the device_caps is: 0x%x\n", cap.device_caps);

	fmtdesc.index = 0; //form number
	fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;//frame type
	while(ioctl(camera_node->fd, VIDIOC_ENUM_FMT, &fmtdesc) != -1)
	{
		printf("VIDIOC_ENUM_FMT success! fmtdesc.index:%d, fmtdesc.type:%d, fmtdesc.flags:%d, "
			   "fmtdesc.description:%s, fmtdesc.pixelformat:%d\n",
			   fmtdesc.index, fmtdesc.type, fmtdesc.flags, fmtdesc.description, fmtdesc.pixelformat);
		fmtdesc.index ++;
	}

	if (-1 == xioctl(camera_node->fd, VIDIOC_S_FMT, &camera_node->fmt))
	{
		fprintf(stderr, "%s set fmt failed\n", camera_node->devname);
		return -1;
	}

	printf("VIDIOC_S_FMT success! width:%d, height:%d, pixelformat:%x, field:%d, bytesperline:%d, "
		   "sizeimage:%d, colorspace:%d, priv:%d, flags:%x, ycbcr_enc:%d, quantization:%d, xfer_func:%d\n",
		   camera_node->fmt.fmt.pix.width, camera_node->fmt.fmt.pix.height, camera_node->fmt.fmt.pix.pixelformat,
		   camera_node->fmt.fmt.pix.field, camera_node->fmt.fmt.pix.bytesperline, camera_node->fmt.fmt.pix.sizeimage,
		   camera_node->fmt.fmt.pix.colorspace, camera_node->fmt.fmt.pix.priv, camera_node->fmt.fmt.pix.flags,
		   camera_node->fmt.fmt.pix.ycbcr_enc, camera_node->fmt.fmt.pix.quantization, camera_node->fmt.fmt.pix.xfer_func);

	struct v4l2_streamparm parm = {0};
	parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	xioctl(camera_node->fd, VIDIOC_G_PARM, &parm);
	parm.parm.capture.timeperframe.numerator = 1;
	parm.parm.capture.timeperframe.denominator = camera_node->parm.parm.capture.timeperframe.denominator;
	ret = xioctl(camera_node->fd, VIDIOC_S_PARM, &parm);
	if(ret !=0 )
	{
		printf("line:%d parm set error, errno:%d, str:%s\n", __LINE__, errno, strerror(errno));
		return -1;
	}
	printf("fd %d ret %d set Frame rate %.3f fps\n", camera_node->fd, ret,
		   1.0 * parm.parm.capture.timeperframe.denominator / parm.parm.capture.timeperframe.numerator);

	// Require the Driver of V4L2 buffers for MMAP
	if (-1 == xioctl(camera_node->fd, VIDIOC_REQBUFS, &camera_node->req))
	{
		if (EINVAL == errno)
		{
			fprintf(stderr, "%s does not support memory mapping\n", "USBCAMERA");
			return -1;
		}
		else
		{
			return -1;
		}
	}

	// Make the buffers map to the user space
	for (camera_node->n_buffers = 0; camera_node->n_buffers < camera_node->req.count; ++camera_node->n_buffers)
	{
		struct v4l2_buffer buf;

		CLEAR(buf);

		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = camera_node->n_buffers;

		if (-1 == xioctl(camera_node->fd, VIDIOC_QUERYBUF, &buf))
		{
			ret = -1;
			break;
		}

		camera_node->buffers[camera_node->n_buffers].length = buf.length;
		camera_node->buffers[camera_node->n_buffers].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE ,MAP_SHARED, camera_node->fd, buf.m.offset);
		printf("mmap buffer index:%d buf %p length %d\n", camera_node->n_buffers, camera_node->buffers[camera_node->n_buffers].start, buf.length);

		if (MAP_FAILED == camera_node->buffers[camera_node->n_buffers].start)
		{
			ret = -1;
			break;
		}
	}
	if((ret == -1) && (camera_node->n_buffers != 0))
	{
		for(ret = 0; ret < camera_node->n_buffers; ret++)
		{
			munmap(camera_node->buffers[camera_node->n_buffers].start, camera_node->buffers[camera_node->n_buffers].length);
			printf("munmap buffer index:%d buf %p length %ld\n",
				   camera_node->n_buffers, camera_node->buffers[camera_node->n_buffers].start,
				   camera_node->buffers[camera_node->n_buffers].length);
		}
		return -1;
	}

	return 0;
}

static int start_capturing(struct usbcamera_node *camera_node)
{
	unsigned int i;
	enum v4l2_buf_type type;
	int n_buffers = 0;

	n_buffers = camera_node->n_buffers;
	printf("start_capturing fd %d n_buffers %d\n", camera_node->fd, n_buffers);
	for (i = 0; i < n_buffers; ++i)
	{
		struct v4l2_buffer buf;

		CLEAR(buf);
		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = i;

		if (-1 == xioctl(camera_node->fd, VIDIOC_QBUF, &buf))
		{
			printf("fd %d VIDIOC_QBUF faild\n", camera_node->fd);
			return -1;
		}
	}
	printf("fd %d VIDIOC_QBUF OK!\n", camera_node->fd);

	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (-1 == xioctl(camera_node->fd, VIDIOC_STREAMON, &type))
	{
		printf("fd %d VIDIOC_STREAMON faild\n", camera_node->fd);
		return -1;
	}
	printf("fd %d VIDIOC_STREAMON Ok!\n", camera_node->fd);
	return 0;
}

static int stop_capturing(struct usbcamera_node *camera_node)
{
	enum v4l2_buf_type type;

	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (-1 == xioctl(camera_node->fd, VIDIOC_STREAMOFF, &type))
	{
		printf("fd %d VIDIOC_STREAMOFF faild\n", camera_node->fd);
		return -1;
	}
	printf("fd %d VIDIOC_STREAMOFF Ok!\n", camera_node->fd);
	return 0;
}

static int read_frame(struct usbcamera_node *camera_node, unsigned char *pbuf, unsigned int ch, struct timeval *tvl)
{
	struct v4l2_buffer buf;
	int count = 0;
	int n_buffers = 0;

	n_buffers = camera_node->n_buffers;
	CLEAR(buf);
	buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	buf.memory = V4L2_MEMORY_MMAP;
	if (-1 == xioctl(camera_node->fd, VIDIOC_DQBUF, &buf))
	{
		switch (errno) {
		case EAGAIN:
			return 0;
		case EIO:
		/* Could ignore EIO, see spec. */
		/* fall through */
		default:
			{
				printf("VIDIOC_DQBUF faild\n");
				return -1;
			}
		}
	}

	if(buf.index > n_buffers)
	{
		printf("buf.indx < n_buffers %d %d\n", buf.index, n_buffers);
		return -1;
	}

	memcpy(pbuf, camera_node->buffers[buf.index].start, buf.bytesused);
	tvl->tv_sec = buf.timestamp.tv_sec;
	tvl->tv_usec = buf.timestamp.tv_usec;
	count = buf.bytesused;

	if (-1 == xioctl(camera_node->fd, VIDIOC_QBUF, &buf))
	{
		printf("VIDIOC_QBUF faild\n");
	}

	return count;
}

void free_camra_resource(struct usbcamera_node *camera_node)
{
	int cnt = 0;
	for(cnt = 0; cnt < camera_node->n_buffers; cnt++)
	{
		munmap(camera_node->buffers[cnt].start, camera_node->buffers[cnt].length);
		printf("munmap buffer index:%d buf %p length %ld\n",
			   cnt, camera_node->buffers[cnt].start,
			   camera_node->buffers[cnt].length);
	}
}


int read_buffer(void *opaque, uint8_t *pbuf, int buf_size)
{
	struct timeval tvl;
	if(poll(usbcamra_poll_fd, usbcamra_poll_fd_num, -1) == -1)
	{
		printf("usbcamra poll failed !!!!!!!!!!!!!\n");
		return AVERROR_EXTERNAL;
	}

	if((usbcamra_poll_fd[0].revents & POLLERR) == POLLERR)
	{
		printf("usbcamra_poll_fd[0].revents 0x%x\n", usbcamra_poll_fd[0].revents);
		return AVERROR_EXTERNAL;
	}

	if(usbcamra_poll_fd[0].revents && POLLIN)
	{
		frame_len = read_frame(&usbcamra, pbuf, 0, &tvl);
		printf("frame_cnt:%d, frame_len:%d, tvl.tv_sec:%ld ", frame_cnt, frame_len, tvl.tv_sec);
		printf("%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x "
			   "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x \n",
			   pbuf[0],pbuf[1],pbuf[2],pbuf[3],pbuf[4],pbuf[5],pbuf[6],pbuf[7],pbuf[8],pbuf[9],pbuf[10],pbuf[11],
			   pbuf[12],pbuf[13],pbuf[14],pbuf[15],pbuf[16],pbuf[17],pbuf[18],pbuf[19],pbuf[20],pbuf[21],pbuf[22],
			   pbuf[23],pbuf[24],pbuf[25],pbuf[26],pbuf[27],pbuf[28],pbuf[29],pbuf[30],pbuf[31]);
	}
	frame_cnt++;
	usbcamra_poll_fd[0].revents = 0;

	if(frame_len > buf_size)
	{
		printf("frame_len is too big then buf_size\n");
		return buf_size;
	}
	return (int)frame_len;

}

//ffmpeg -f v4l2 -list_formats all -i /dev/video0
//程序執行:./ffmpeg_usb_rtmp /dev/video0 1280 720 30 1500000
int main(int argc, char* argv[])
{
    int videoindex = -1;
    unsigned int frame_rate = 0;
    //所有代碼執行之前要調用av_register_all和avformat_network_init
    //初始化所有的封裝和解封裝 flv mp4 mp3 mov。不包含編碼和解碼
    av_register_all();
    avformat_network_init();

    if(argc != 5)
	{
		usbcamra.fmt.fmt.pix.width = 1280;
		usbcamra.fmt.fmt.pix.height = 720;
		frame_rate = 30;
	}
	else
	{
		usbcamra.fmt.fmt.pix.width = atoi(argv[2]);
		usbcamra.fmt.fmt.pix.height = atoi(argv[3]);
		frame_rate = atoi(argv[4]);
	}

	sprintf(usbcamra.devname, "%s", argv[1]);
	printf("width:%d, height:%d, dev:%s", usbcamra.fmt.fmt.pix.width, usbcamra.fmt.fmt.pix.height, usbcamra.devname);

	usbcamra.fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	usbcamra.fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
	usbcamra.fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;

	CLEAR(usbcamra.parm);
	usbcamra.parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	usbcamra.parm.parm.capture.timeperframe.numerator = 1;
	usbcamra.parm.parm.capture.timeperframe.denominator = frame_rate;
	CLEAR(usbcamra.req);
	usbcamra.req.count = 16;
	usbcamra.req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	usbcamra.req.memory = V4L2_MEMORY_MMAP;
	usbcamra.buffers = calloc(usbcamra.req.count, sizeof(struct buffer));
	if(!usbcamra.buffers)
	{
		fprintf(stderr, "calloc faild, errno:%d, str:%s\n", errno, strerror(errno));
		return -1;
	}

	video_init(&usbcamra);
	start_capturing(&usbcamra);
	usbcamra_poll_fd[0].fd = usbcamra.fd;
	usbcamra_poll_fd[0].events = POLLIN;
	usbcamra_poll_fd_num = 1;

    //輸出的地址
    const char *outUrl = "rtmp://192.168.1.102:1935/live";
    
	//AVFormatContext **ps  輸入封裝的上下文。包含所有的格式內容和所有的IO。如果是文件就是文件IO,網絡就對應網絡IO
	AVFormatContext *ifmt_ctx = NULL;

	ifmt_ctx = avformat_alloc_context();
	unsigned char* inbuffer=NULL;
	inbuffer = (unsigned char*)av_malloc(AV_IO_BUF_SIZE);
	if(inbuffer == NULL)
	{
		avformat_free_context(ifmt_ctx);
		printf("line:%d av_malloc failed!\n", __LINE__);
		return -1;
	}
	AVIOContext *avio_in = avio_alloc_context(inbuffer, AV_IO_BUF_SIZE, 0, NULL, read_buffer, NULL, NULL);
	if(avio_in == NULL)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		printf("line:%d avio_alloc_context failed!\n", __LINE__);
		return -1;
	}

	ifmt_ctx->pb = avio_in;
	ifmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;

	//打開文件,解封文件頭
	int ret = avformat_open_input(&ifmt_ctx, NULL, NULL, NULL);
	if (ret < 0)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		avio_context_free(&avio_in);
		return avError(ret);
	}
	printf("avformat_open_input success!\n");

	ret = avformat_find_stream_info(ifmt_ctx, NULL);
	if (ret != 0)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		avio_context_free(&avio_in);
		return avError(ret);
	}
	//打印視頻視頻信息
	//0打印所有  inUrl 打印時候顯示,
	av_dump_format(ifmt_ctx, 0, NULL, 0);

	AVFormatContext * ofmt_ctx = NULL;
	//如果是輸入文件 flv可以不傳,可以從文件中判斷。如果是流則必須傳
	//創建輸出上下文
	ret = avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", outUrl);
	if (ret < 0)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		avio_context_free(&avio_in);
		avformat_free_context(ofmt_ctx);
		return avError(ret);
	}
	printf("avformat_alloc_output_context2 success!\n");
	printf("ifmt_ctx->nb_streams:%d\n", ifmt_ctx->nb_streams);
	//查找到當前輸入流中的視頻流,並記錄視頻流的索引
	unsigned int i;
	for (i = 0; i < ifmt_ctx->nb_streams; i++)
	{
		//獲取輸入視頻流
		AVStream *in_stream = ifmt_ctx->streams[i];
		if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			videoindex = i;
		}
		//爲輸出上下文添加音視頻流(初始化一個音視頻流容器)
		AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
		if (!out_stream)
		{
			printf("未能成功添加音視頻流\n");
			ret = AVERROR_UNKNOWN;
		}

		//將輸入編解碼器上下文信息 copy 給輸出編解碼器上下文
		//ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
		ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
		//ret = avcodec_parameters_from_context(out_stream->codecpar, in_stream->codec);
		//ret = avcodec_parameters_to_context(out_stream->codec, in_stream->codecpar);
		if (ret < 0)
		{
			printf("copy 編解碼器上下文失敗\n");
		}
		out_stream->codecpar->codec_tag = 0;

		out_stream->codec->codec_tag = 0;
		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
			out_stream->codec->flags = out_stream->codec->flags | AV_CODEC_FLAG_GLOBAL_HEADER;
		}
	}

	printf("videoindex:%d\n", videoindex);
	av_dump_format(ofmt_ctx, 0, outUrl, 1);

	//打開IO
	ret = avio_open(&ofmt_ctx->pb, outUrl, AVIO_FLAG_WRITE);
	if (ret < 0)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		avio_context_free(&avio_in);
		avformat_free_context(ofmt_ctx);
		return avError(ret);
	}

	//寫入頭部信息
	ret = avformat_write_header(ofmt_ctx, 0);
	if (ret < 0)
	{
		avformat_free_context(ifmt_ctx);
		av_free((void*)inbuffer);
		avio_context_free(&avio_in);
		avformat_free_context(ofmt_ctx);
		avError(ret);
	}

	printf("avformat_write_header Success!\n");
	//推流每一幀數據
	//int64_t pts  [ pts*(num/den)  第幾秒顯示]
	//int64_t dts  解碼時間 [P幀(相對於上一幀的變化) I幀(關鍵幀,完整的數據) B幀(上一幀和下一幀的變化)]  有了B幀壓縮率更高。
	AVPacket pkt;
	//獲取當前的時間戳  微妙
	long long start_time = av_gettime();
	long long frame_index = 0;
	while (1)
	{
		//輸入輸出視頻流
		AVStream *in_stream, *out_stream;
		//獲取解碼前數據
		ret = av_read_frame(ifmt_ctx, &pkt);
		if (ret < 0) break;

		//PTS(Presentation Time Stamp)顯示播放時間
		//DTS(Decoding Time Stamp)解碼時間
		//沒有顯示時間(比如未解碼的 H.264 )
		if (pkt.pts == AV_NOPTS_VALUE)
		{
			//AVRational time_base:時基。通過該值可以把PTS,DTS轉化爲真正的時間。
			AVRational time_base1 = ifmt_ctx->streams[videoindex]->time_base;
			//計算兩幀之間的時間
			int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(ifmt_ctx->streams[videoindex]->r_frame_rate);
			//配置參數
			pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
			pkt.dts = pkt.pts;
			pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
		}

		if (pkt.stream_index == videoindex)
		{
			AVRational time_base = ifmt_ctx->streams[videoindex]->time_base;
			AVRational time_base_q = { 1,AV_TIME_BASE };
			//計算視頻播放時間
			int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q);
			//計算實際視頻的播放時間
			int64_t now_time = av_gettime() - start_time;

			AVRational avr = ifmt_ctx->streams[videoindex]->time_base;
			printf("avr.num:%d, avr.den:%d, pkt.dts:%ld, pkt.pts:%ld, pts_time:%ld\n",
					avr.num,    avr.den,    pkt.dts,     pkt.pts,     pts_time);
			if (pts_time > now_time)
			{
				//睡眠一段時間(目的是讓當前視頻記錄的播放時間與實際時間同步)
				printf("pts_time:%ld, now_time:%ld\n", pts_time, now_time);
				av_usleep((unsigned int)(pts_time - now_time));
			}
		}

		in_stream = ifmt_ctx->streams[pkt.stream_index];
		out_stream = ofmt_ctx->streams[pkt.stream_index];

		//計算延時後,重新指定時間戳
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.duration = (int)av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
		//字節流的位置,-1 表示不知道字節流位置,由程序自行探測
		pkt.pos = -1;
		if (pkt.stream_index == videoindex) {
			printf("Send %8lld video frames to output URL\n", frame_index);
			frame_index++;
		}

		ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
		if (ret < 0)
		{
			printf("發送數據包出錯\n");
			break;
		}
		av_free_packet(&pkt);
	}

	stop_capturing(&usbcamra);
	free_camra_resource(&usbcamra);
	avformat_free_context(ifmt_ctx);
	av_free((void*)inbuffer);
	avio_context_free(&avio_in);
	avformat_free_context(ofmt_ctx);
	return 0;
}

int avError(int errNum) {
    char buf[1024];
    av_strerror(errNum, buf, sizeof(buf));
    printf("failed!\n");
    return -1;
}

程序運行方式:./ffmpeg_usb_rtmp /dev/video0 1280 720 30 1500000
有些USB攝像頭支持命令設置H264編碼率,但是這裏沒有寫出來,因爲不同廠家,命令不同,貌似v4l2沒有這樣統一的命令去設置編碼率。
以上代碼僅僅是測試讀取USB攝像頭H264幀進行rtmp推流的驗證,直接在readbuffer函數使用poll讀取數據,當幀率設置爲30 時,存在丟幀播放花屏,這是因爲使用的單線程,發送rtmp流的時候,不能去讀數據,因此會丟幀。
另外也沒有添加音頻合成,然後在推流,後面接着弄吧,最終驗證完之後,還要移植到imx6平臺上。

makefile

TARGET		 = ffmpeg_usb_rtmp
LIB_PATH 	 = /usr/local/lib/

FFMPEG_LIBS = -lavutil -lavdevice -lavformat -lavcodec -lswresample -lavfilter -lswscale 
SDL_LIBS	= -lSDL2
EXTRA_LIBS  = -lz -lm -lpthread -lstdc++ -lm -lrt -lpcre
ALL_LIBS	= $(EXTRA_LIBS) $(SDL_LIBS) $(FFMPEG_LIBS)  
 
COMPILE_OPTS = -v -g -Wall -Wno-deprecated-declarations 

C_COMPILER   = gcc
C_FLAGS 	 = $(CFLAGS) $(COMPILE_OPTS) 
LD_FLAGS 	 = -L$(LIB_PATH) $(LDFLAGS)

SRC = ffmpeg_usb_rtmp.c

ALL:
	$(C_COMPILER) $(C_FLAGS) $(LD_FLAGS) $(SRC) -o $(TARGET) $(ALL_LIBS) 
clean:
	rm -rf $(TARGET) *.o *.mp4 *.wav *.h264 *.avi *.flv 

學習ffmpeg的路還有很長!

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章