FFMPEG to send RTSP encoded stream C++ -
i trying figure out way take encoded h264 image have created in ffmeg , send out via rtsp using ffmpeg, there sample code or tutorial out there shows how this. tried searching web, there nothing find out there.
any appreciated. in advance.
i made few changes in docs/examples/muxing.c here code transmits audio , video streams using mpeg4 rtsp
#include <stdio.h> #include <vector> #include <windows.h> #include <iostream> #include <fstream> #include <time.h> #define _xopen_source 600 /* usleep */ extern "c" { #ifndef __stdc_constant_macros #define __stdc_constant_macros #endif #include <libavcodec\avcodec.h> #include <libavutil/imgutils.h> #include <libavutil/samplefmt.h> //#include <libavutil/timestamp.h> #include <libswscale\swscale.h> #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libavfilter/avfiltergraph.h> #include <libavfilter/avcodec.h> #include <libavfilter/buffersink.h> #include <libavfilter/buffersrc.h> #include <libavutil/opt.h> #include <libavutil/imgutils.h> #include <libavutil/mathematics.h> #include <libavutil/samplefmt.h> #include <libavutil/channel_layout.h> #include <libavutil/common.h> #include <math.h> #include <libavutil/opt.h> #include <libavcodec/avcodec.h> #include <libavutil/channel_layout.h> #include <libavutil/common.h> #include <libavutil/imgutils.h> #include <libavutil/mathematics.h> #include <libavutil/samplefmt.h> #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libavfilter/avfiltergraph.h> #include <libavfilter/avcodec.h> #include <libavfilter/buffersink.h> #include <libavfilter/buffersrc.h> #include <libavutil/opt.h> #include <libavutil/opt.h> #include <libavutil/mathematics.h> #include <libavutil/timestamp.h> #include <libavformat/avformat.h> #include <libswscale/swscale.h> #include <libswresample/swresample.h> } using namespace std; #pragma comment(lib, "dev\\avcodec.lib") #pragma comment(lib, "dev\\avformat.lib") #pragma comment(lib, "dev\\avfilter.lib") //#pragma comment(lib, "dev\\avcodec.lib") #pragma comment(lib, "dev\\avdevice.lib") //#pragma comment(lib, "dev\\avfilter.lib") //#pragma comment(lib, "dev\\avformat.lib") #pragma comment(lib, "dev\\avutil.lib") #pragma comment(lib, "dev\\postproc.lib") #pragma comment(lib, "dev\\swresample.lib") #pragma comment(lib, "dev\\swscale.lib") #ifdef _msc_ver //#define snprintf c99_snprintf //inline int c99_vsnprintf(char* str, size_t size, const char* format, va_list ap) //{ // int count = -1; // // if (size != 0) // count = _vsnprintf_s(str, size, _truncate, format, ap); // if (count == -1) // count = _vscprintf(format, ap); // // return count; //} //inline int c99_snprintf(char* str, size_t size, const char* format, ...) //{ // int count; // va_list ap; // // va_start(ap, format); // count = c99_vsnprintf(str, size, format, ap); // va_end(ap); // // return count; //} #endif static int audio_is_eof, video_is_eof; #define stream_duration 50.0 #define stream_frame_rate 25 /* 25 images/s */ #define stream_pix_fmt av_pix_fmt_yuv420p /* default pix_fmt */ static int sws_flags = sws_bicubic; static void log_packet(const avformatcontext *fmt_ctx, const avpacket *pkt) { avrational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base; /*printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n", av_ts_make_string(pkt->pts), av_ts_make_string(pkt->pts, time_base), av_ts_make_string(pkt->dts), av_ts_make_string(pkt->dts, time_base), av_ts_make_string(pkt->duration), av_ts_make_string(pkt->duration, time_base), pkt->stream_index);*/ } static int write_frame(avformatcontext *fmt_ctx, const avrational *time_base, avstream *st, avpacket *pkt) { /* rescale output packet timestamp values codec stream timebase */ pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, avrounding(av_round_near_inf|av_round_pass_minmax)); pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, avrounding(av_round_near_inf|av_round_pass_minmax)); pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base); pkt->stream_index = st->index; /* write compressed frame media file. */ log_packet(fmt_ctx, pkt); return av_interleaved_write_frame(fmt_ctx, pkt); } /* add output stream. */ static avstream *add_stream(avformatcontext *oc, avcodec **codec, enum avcodecid codec_id) { avcodeccontext *c; avstream *st; /* find encoder */ *codec = avcodec_find_encoder(codec_id); if (!(*codec)) { fprintf(stderr, "could not find encoder '%s'\n", avcodec_get_name(codec_id)); exit(1); } st = avformat_new_stream(oc, *codec); if (!st) { fprintf(stderr, "could not allocate stream\n"); exit(1); } st->id = oc->nb_streams-1; c = st->codec; switch ((*codec)->type) { case avmedia_type_audio: c->sample_fmt = (*codec)->sample_fmts ? (*codec)->sample_fmts[0] : av_sample_fmt_fltp; c->bit_rate = 64000; c->sample_rate = 44100; c->channels = 2; break; case avmedia_type_video: c->codec_id = codec_id; if(codec_id == codec_id_h264) cout<<"codec id "<<(avcodecid)codec_id<<endl; c->bit_rate = 400000; /* resolution must multiple of two. */ c->width = 352; c->height = 288; /* timebase: fundamental unit of time (in seconds) in terms * of frame timestamps represented. fixed-fps content, * timebase should 1/framerate , timestamp increments should * identical 1. */ c->time_base.den = stream_frame_rate; c->time_base.num = 1; c->gop_size = 12; /* emit 1 intra frame every twelve frames @ */ c->pix_fmt = stream_pix_fmt; if (c->codec_id == av_codec_id_mpeg2video) { /* testing, add b frames */ c->max_b_frames = 2; } if (c->codec_id == av_codec_id_mpeg1video) { /* needed avoid using macroblocks in coeffs overflow. * not happen normal video, happens here * motion of chroma plane not match luma plane. */ c->mb_decision = 2; } break; default: break; } /* formats want stream headers separate. */ if (oc->oformat->flags & avfmt_globalheader) c->flags |= codec_flag_global_header; return st; } /**************************************************************/ /* audio output */ static float t, tincr, tincr2; avframe *audio_frame; static uint8_t **src_samples_data; static int src_samples_linesize; static int src_nb_samples; static int max_dst_nb_samples; uint8_t **dst_samples_data; int dst_samples_linesize; int dst_samples_size; int samples_count; struct swrcontext *swr_ctx = null; static void open_audio(avformatcontext *oc, avcodec *codec, avstream *st) { avcodeccontext *c; int ret; c = st->codec; /* allocate , init re-usable frame */ audio_frame = av_frame_alloc(); if (!audio_frame) { fprintf(stderr, "could not allocate audio frame\n"); exit(1); } /* open */ ret = avcodec_open2(c, codec, null); if (ret < 0) { fprintf(stderr, "could not open audio codec:\n"); exit(1); } /* init signal generator */ t = 0; tincr = 2 * m_pi * 110.0 / c->sample_rate; /* increment frequency 110 hz per second */ tincr2 = 2 * m_pi * 110.0 / c->sample_rate / c->sample_rate; src_nb_samples = c->codec->capabilities & codec_cap_variable_frame_size ? 10000 : c->frame_size; ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels, src_nb_samples, av_sample_fmt_s16, 0); if (ret < 0) { fprintf(stderr, "could not allocate source samples\n"); exit(1); } /* compute number of converted samples: buffering avoided * ensuring output buffer contain @ least * converted input samples */ max_dst_nb_samples = src_nb_samples; /* create resampler context */ if (c->sample_fmt != av_sample_fmt_s16) { swr_ctx = swr_alloc(); if (!swr_ctx) { fprintf(stderr, "could not allocate resampler context\n"); exit(1); } /* set options */ av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0); av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", av_sample_fmt_s16, 0); av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0); av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0); /* initialize resampling context */ if ((ret = swr_init(swr_ctx)) < 0) { fprintf(stderr, "failed initialize resampling context\n"); exit(1); } ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels, max_dst_nb_samples, c->sample_fmt, 0); if (ret < 0) { fprintf(stderr, "could not allocate destination samples\n"); exit(1); } } else { dst_samples_data = src_samples_data; } dst_samples_size = av_samples_get_buffer_size(null, c->channels, max_dst_nb_samples, c->sample_fmt, 0); } /* prepare 16 bit dummy audio frame of 'frame_size' samples , * 'nb_channels' channels. */ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) { int j, i, v; int16_t *q; q = samples; (j = 0; j < frame_size; j++) { v = (int)(sin(t) * 10000); (i = 0; < nb_channels; i++) *q++ = v; t += tincr; tincr += tincr2; } } static void write_audio_frame(avformatcontext *oc, avstream *st, int flush) { avcodeccontext *c; avpacket pkt = { 0 }; // data , size must 0; int got_packet, ret, dst_nb_samples; av_init_packet(&pkt); c = st->codec; if (!flush) { get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels); /* convert samples native format destination codec format, using resampler */ if (swr_ctx) { /* compute destination number of samples */ dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples, c->sample_rate, c->sample_rate, av_round_up); if (dst_nb_samples > max_dst_nb_samples) { av_free(dst_samples_data[0]); ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels, dst_nb_samples, c->sample_fmt, 0); if (ret < 0) exit(1); max_dst_nb_samples = dst_nb_samples; dst_samples_size = av_samples_get_buffer_size(null, c->channels, dst_nb_samples, c->sample_fmt, 0); } /* convert destination format */ ret = swr_convert(swr_ctx, dst_samples_data, dst_nb_samples, (const uint8_t **)src_samples_data, src_nb_samples); if (ret < 0) { fprintf(stderr, "error while converting\n"); exit(1); } } else { dst_nb_samples = src_nb_samples; } avrational t= {1, c->sample_rate}; audio_frame->nb_samples = dst_nb_samples; audio_frame->pts = av_rescale_q(samples_count,t , c->time_base); avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt, dst_samples_data[0], dst_samples_size, 0); samples_count += dst_nb_samples; } ret = avcodec_encode_audio2(c, &pkt, flush ? null : audio_frame, &got_packet); if (ret < 0) { fprintf(stderr, "error encoding audio frame:"); exit(1); } if (!got_packet) { if (flush) audio_is_eof = 1; return; } ret = write_frame(oc, &c->time_base, st, &pkt); if (ret < 0) { fprintf(stderr, "error while writing audio frame: "); exit(1); } } static void close_audio(avformatcontext *oc, avstream *st) { avcodec_close(st->codec); if (dst_samples_data != src_samples_data) { av_free(dst_samples_data[0]); av_free(dst_samples_data); } av_free(src_samples_data[0]); av_free(src_samples_data); av_frame_free(&audio_frame); } /**************************************************************/ /* video output */ static avframe *frame; static avpicture src_picture, dst_picture; static int frame_count; static void open_video(avformatcontext *oc, avcodec *codec, avstream *st) { int ret; avcodeccontext *c = st->codec; /* open codec */ ret = avcodec_open2(c, codec, null); if (ret < 0) { fprintf(stderr, "could not open video codec: "); exit(1); } /* allocate , init re-usable frame */ frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "could not allocate video frame\n"); exit(1); } frame->format = c->pix_fmt; frame->width = c->width; frame->height = c->height; /* allocate encoded raw picture. */ ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height); if (ret < 0) { fprintf(stderr, "could not allocate picture: "); exit(1); } /* if output format not yuv420p, temporary yuv420p * picture needed too. converted required * output format. */ if (c->pix_fmt != av_pix_fmt_yuv420p) { ret = avpicture_alloc(&src_picture, av_pix_fmt_yuv420p, c->width, c->height); if (ret < 0) { fprintf(stderr, "could not allocate temporary picture:"); exit(1); } } /* copy data , linesize picture pointers frame */ *((avpicture *)frame) = dst_picture; } /* prepare dummy image. */ static void fill_yuv_image(avpicture *pict, int frame_index, int width, int height) { int x, y, i; = frame_index; /* y */ (y = 0; y < height; y++) (x = 0; x < width; x++) pict->data[0][y * pict->linesize[0] + x] = x + y + * 3; /* cb , cr */ (y = 0; y < height / 2; y++) { (x = 0; x < width / 2; x++) { pict->data[1][y * pict->linesize[1] + x] = 128 + y + * 2; pict->data[2][y * pict->linesize[2] + x] = 64 + x + * 5; } } } static void write_video_frame(avformatcontext *oc, avstream *st, int flush) { int ret; static struct swscontext *sws_ctx; avcodeccontext *c = st->codec; if (!flush) { if (c->pix_fmt != av_pix_fmt_yuv420p) { /* generate yuv420p picture, must convert * codec pixel format if needed */ if (!sws_ctx) { sws_ctx = sws_getcontext(c->width, c->height, av_pix_fmt_yuv420p, c->width, c->height, c->pix_fmt, sws_flags, null, null, null); if (!sws_ctx) { fprintf(stderr, "could not initialize conversion context\n"); exit(1); } } fill_yuv_image(&src_picture, frame_count, c->width, c->height); sws_scale(sws_ctx, (const uint8_t * const *)src_picture.data, src_picture.linesize, 0, c->height, dst_picture.data, dst_picture.linesize); } else { fill_yuv_image(&dst_picture, frame_count, c->width, c->height); } } if (oc->oformat->flags & avfmt_rawpicture && !flush) { /* raw video case - directly store picture in packet */ avpacket pkt; av_init_packet(&pkt); pkt.flags |= av_pkt_flag_key; pkt.stream_index = st->index; pkt.data = dst_picture.data[0]; pkt.size = sizeof(avpicture); ret = av_interleaved_write_frame(oc, &pkt); } else { avpacket pkt = { 0 }; int got_packet; av_init_packet(&pkt); /* encode image */ frame->pts = frame_count; ret = avcodec_encode_video2(c, &pkt, flush ? null : frame, &got_packet); if (ret < 0) { fprintf(stderr, "error encoding video frame:"); exit(1); } /* if size zero, means image buffered. */ if (got_packet) { //cout<<"got packet"<<endl; ret = write_frame(oc, &c->time_base, st, &pkt); } else { //cout<<"eof\n"; if (flush) video_is_eof = 1; ret = 0; } } if (ret < 0) { fprintf(stderr, "error while writing video frame: "); exit(1); } frame_count++; } static void close_video(avformatcontext *oc, avstream *st) { avcodec_close(st->codec); av_free(src_picture.data[0]); av_free(dst_picture.data[0]); av_frame_free(&frame); } /**************************************************************/ /* media file output */ int main(int argc, char **argv) { const char *filename = "rtsp://127.0.0.1:8554/live.sdp"; avoutputformat *fmt; avformatcontext *oc; avstream *audio_st, *video_st; avcodec *audio_codec, *video_codec; double audio_time, video_time; int flush, ret; /* initialize libavcodec, , register codecs , formats. */ av_register_all(); avformat_network_init(); /* allocate output media context */ avformat_alloc_output_context2(&oc, null, "rtsp", filename); if (!oc) { printf("could not deduce output format file extension: using mpeg.\n"); avformat_alloc_output_context2(&oc, null, "mpeg", filename); } if (!oc) return 1; fmt = oc->oformat; if(!fmt) cout<<"error creating outformat\n"; /* add audio , video streams using default format codecs * , initialize codecs. */ /*if(av_opt_set(fmt,"rtsp_transport","tcp",0) < 0) cout<<"opt not set\n";*/ video_st = null; audio_st = null; cout<<"codec = "<<avcodec_get_name(fmt->video_codec)<<endl; if (fmt->video_codec != av_codec_id_none) { video_st = add_stream(oc, &video_codec, fmt->video_codec); } if (fmt->audio_codec != av_codec_id_none) audio_st = add_stream(oc, &audio_codec, fmt->audio_codec); /* parameters set, can open audio , * video codecs , allocate necessary encode buffers. */ if (video_st) open_video(oc, video_codec, video_st); if (audio_st) open_audio(oc, audio_codec, audio_st); av_dump_format(oc, 0, filename, 1); char errorbuff[80]; if (!(fmt->flags & avfmt_nofile)) { ret = avio_open(&oc->pb, filename, avio_flag_write); if (ret < 0) { fprintf(stderr, "could not open outfile '%s': %s",filename,av_make_error_string(errorbuff,80,ret)); return 1; } } ret = avformat_write_header(oc, null); if (ret < 0) { fprintf(stderr, "error occurred when writing header: %s",av_make_error_string(errorbuff,80,ret)); return 1; } flush = 0; while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) { /* compute current audio , video time. */ audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : infinity; video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : infinity; if (!flush && (!audio_st || audio_time >= stream_duration) && (!video_st || video_time >= stream_duration)) { flush = 1; } /* write interleaved audio , video frames */ if (audio_st && !audio_is_eof && audio_time <= video_time) { write_audio_frame(oc, audio_st, flush); } else if (video_st && !video_is_eof && video_time < audio_time) { write_video_frame(oc, video_st, flush); } } /* write trailer, if any. trailer must written before * close codeccontexts open when wrote header; otherwise * av_write_trailer() may try use memory freed on * av_codec_close(). */ av_write_trailer(oc); /* close each codec. */ if (video_st) close_video(oc, video_st); if (audio_st) close_audio(oc, audio_st); if (!(fmt->flags & avfmt_nofile)) /* close output file. */ avio_close(oc->pb); /* free stream */ avformat_free_context(oc); return 0; }
the above code acts client before executing code need start server receive stream. used following command purpose
ffplay -rtsp_flags listen -i rtsp://127.0.0.1:8554/live.sdp
Comments
Post a Comment