从output入手,梳理一下obs output的结构。这里需要仔细过一遍,因为接下来需要把视频写入Unreal的Rendertarget对象,来渲染成材质。

音频也需要单独接入到Unreal引擎中。梳理的过程中,非核心的逻辑和标记我会去掉,只保留主干。

//碧麟备注版struct obs_output {    // obs上下文    struct obs_context_data context;    // 输出结构信息    struct obs_output_info info;    /* indicates ownership of the info.id buffer */    bool owns_info_id;    int64_t video_offset;    int64_t audio_offsets[MAX_OUTPUT_AUDIO_ENCODERS];    int64_t highest_audio_ts;    int64_t highest_video_ts;    pthread_t end_data_capture_thread;     int total_frames;    //视频信息指针    video_t *video;    //音频信息指针    audio_t *audio;    //视频编码器    obs_encoder_t *video_encoder;    //音频编码器,因为支持多路音频合成,所以这里用的是数组    obs_encoder_t *audio_encoders[MAX_OUTPUT_AUDIO_ENCODERS];    struct circlebuf audio_buffer[MAX_AUDIO_MIXES][MAX_AV_PLANES];    uint64_t audio_start_ts;    uint64_t video_start_ts;    size_t audio_size;    size_t planes;    size_t sample_rate;    size_t total_audio_frames;    uint32_t scaled_width;    uint32_t scaled_height;    struct video_scale_info video_conversion;    struct audio_convert_info audio_conversion;  struct circlebuf caption_data;    float audio_data[MAX_AUDIO_CHANNELS][AUDIO_OUTPUT_FRAMES];};

看完上面的output结构,我们实际调试一下,点击“屏幕录制”,会进入obs_output_start这个函数,这个函数是个马甲,简单带过

//碧麟精简标注版bool obs_output_start(obs_output_t *output){bool encoded;bool has_service;encoded = (output->info.flags & OBS_OUTPUT_ENCODED) != 0;    if (encoded && output->delay_sec) {        //延迟运行return obs_output_delay_start(output);} else {        //实际运行if (obs_output_actual_start(output)) {            //发送starting signaldo_output_signal(output, "starting");return true;}return false;}}

接下来是重点

//碧麟精简批注版bool obs_output_actual_start(obs_output_t *output){bool success = false;        // 第一步,调用outp->info.start,参数是output->context.dataif (output->context.data)success = output->info.start(output->context.data);if (success && output->video) {output->starting_frame_count =video_output_get_total_frames(output->video);output->starting_drawn_count = obs->video.total_frames;output->starting_lagged_count = obs->video.lagged_frames;}if (os_atomic_load_long(&output->delay_restart_refs))os_atomic_dec_long(&output->delay_restart_refs);output->caption_timestamp = 0;circlebuf_free(&output->caption_data);circlebuf_init(&output->caption_data);return success;}

第一步是,调用outp->info.start,参数是output->context.data

output->info.start应该是一个函数指针,定义如下

// 碧麟精简批注版// output_info结构 ,主要存储函数指针struct obs_output_info {/* required */const char *id;uint32_t flags;const char *(*get_name)(void *type_data);    //创建void *(*create)(obs_data_t *settings, obs_output_t *output);    //销毁void (*destroy)(void *data);    //开始bool (*start)(void *data);    //停止void (*stop)(void *data, uint64_t ts);void (*raw_video)(void *data, struct video_data *frame);void (*raw_audio)(void *data, struct audio_data *frames);void (*encoded_packet)(void *data, struct encoder_packet *packet);    //get默认设置void (*get_defaults)(obs_data_t *settings);    //get属性obs_properties_t *(*get_properties)(void *data);uint64_t (*get_total_bytes)(void *data);int (*get_dropped_frames)(void *data);void *type_data;/* only used with encoded outputs, separated with semicolon */const char *encoded_video_codecs;const char *encoded_audio_codecs;/* raw audio callback for multi track outputs */void (*raw_audio2)(void *data, size_t idx, struct audio_data *frames);};

context定义如下

//context数据结构struct obs_context_data {char *name;const char *uuid;    // 这个是最重要的void *data;    obs_data_t *settings;signal_handler_t *signals;proc_handler_t *procs;enum obs_obj_type type;struct obs_weak_object *control;DARRAY(obs_hotkey_id) hotkeys;DARRAY(obs_hotkey_pair_id) hotkey_pairs;obs_data_t *hotkey_data;        //多路传输使用,链表struct obs_context_data *next;struct obs_context_data **prev_next;bool private;};

这里,data是context最重要的内容

因为我是用的ffmpeg多路传输做录屏,所以data是一个ffmpeg_muxer

因此output->info.start(output->context.data)在这里展开是这样的结构

static bool ffmpeg_mux_start(void *data){struct ffmpeg_muxer *stream = data;//读取设置    obs_data_t *settings = obs_output_get_settings(stream->output);    //实际开始执行多路传输bool success = ffmpeg_mux_start_internal(stream, settings);obs_data_release(settings);return success;}
//碧麟精简批注版static inline bool ffmpeg_mux_start_internal(struct ffmpeg_muxer *stream,     obs_data_t *settings){    //读取保存路径    //调试结果:C:/Users/86180/Videos/2023-03-16 13-36-11.mp4const char *path = obs_data_get_string(settings, "path");    //设定保存路径update_encoder_settings(stream, path);    //网路版if (stream->is_network) {obs_service_t *service;service = obs_output_get_service(stream->output);if (!service)return false;path = obs_service_get_url(service);stream->split_file = false;} else {       //本地版stream->max_time =obs_data_get_int(settings, "max_time_sec") * 1000000LL;stream->max_size = obs_data_get_int(settings, "max_size_mb") *   (1024 * 1024);stream->split_file = obs_data_get_bool(settings, "split_file");stream->allow_overwrite =obs_data_get_bool(settings, "allow_overwrite");stream->cur_size = 0;stream->sent_headers = false;}ts_offset_clear(stream);    //录屏信息尝试写本地文件,确保文件可写入if (!stream->is_network) {/* ensure output path is writable to avoid generic error * message. * * TODO: remove once ffmpeg-mux is refactored to pass * errors back */FILE *test_file = os_fopen(path, "wb");if (!test_file) {set_file_not_readable_error(stream, settings, path);return false;}fclose(test_file);os_unlink(path);}    //这里会生成一个command    //"D:/dev/obs/obs-studio/build/rundir/Debug/bin/64bit/obs-ffmpeg-mux.exe" "C:/Users/86180/Videos/2023-03-16 14-58-19.mp4" 1 1 h264 850 1280 720 1 1 1 1 1 0 30 1 0 aac "simple_aac" 192 48000 1024 2 "" "" //可以看出是使用外部程序进行录屏    start_pipe(stream, path);if (!stream->pipe) {obs_output_set_last_error(stream->output, obs_module_text("HelperProcessFailed"));warn("Failed to create process pipe");return false;}/* write headers and start capture */os_atomic_set_bool(&stream->active, true);os_atomic_set_bool(&stream->capturing, true);stream->total_bytes = 0;obs_output_begin_data_capture(stream->output, 0);info("Writing file '%s'...", stream->path.array);return true;}

ffmpeg_mux实际工作核心逻辑都在这里

可以看出录屏是用这个外部程序obs-ffmpeg-mux.exe来进行的。

低调潇洒的技术男