OpenShot Library | libopenshot 0.3.2
FFmpegReader.cpp
Go to the documentation of this file.
1
12// Copyright (c) 2008-2019 OpenShot Studios, LLC, Fabrice Bellard
13//
14// SPDX-License-Identifier: LGPL-3.0-or-later
15
16#include <thread> // for std::this_thread::sleep_for
17#include <chrono> // for std::chrono::milliseconds
18#include <unistd.h>
19
20#include "FFmpegUtilities.h"
21
22#include "FFmpegReader.h"
23#include "Exceptions.h"
24#include "Timeline.h"
25#include "ZmqLogger.h"
26
27#define ENABLE_VAAPI 0
28
29#if USE_HW_ACCEL
30#define MAX_SUPPORTED_WIDTH 1950
31#define MAX_SUPPORTED_HEIGHT 1100
32
33#if ENABLE_VAAPI
34#include "libavutil/hwcontext_vaapi.h"
35
36typedef struct VAAPIDecodeContext {
37 VAProfile va_profile;
38 VAEntrypoint va_entrypoint;
39 VAConfigID va_config;
40 VAContextID va_context;
41
42#if FF_API_STRUCT_VAAPI_CONTEXT
43 // FF_DISABLE_DEPRECATION_WARNINGS
44 int have_old_context;
45 struct vaapi_context *old_context;
46 AVBufferRef *device_ref;
47 // FF_ENABLE_DEPRECATION_WARNINGS
48#endif
49
50 AVHWDeviceContext *device;
51 AVVAAPIDeviceContext *hwctx;
52
53 AVHWFramesContext *frames;
54 AVVAAPIFramesContext *hwfc;
55
56 enum AVPixelFormat surface_format;
57 int surface_count;
58 } VAAPIDecodeContext;
59#endif // ENABLE_VAAPI
60#endif // USE_HW_ACCEL
61
62
63using namespace openshot;
64
65int hw_de_on = 0;
66#if USE_HW_ACCEL
67 AVPixelFormat hw_de_av_pix_fmt_global = AV_PIX_FMT_NONE;
68 AVHWDeviceType hw_de_av_device_type_global = AV_HWDEVICE_TYPE_NONE;
69#endif
70
71FFmpegReader::FFmpegReader(const std::string &path, bool inspect_reader)
72 : last_frame(0), is_seeking(0), seeking_pts(0), seeking_frame(0), seek_count(0), NO_PTS_OFFSET(-99999),
73 path(path), is_video_seek(true), check_interlace(false), check_fps(false), enable_seek(true), is_open(false),
74 seek_audio_frame_found(0), seek_video_frame_found(0),is_duration_known(false), largest_frame_processed(0),
75 current_video_frame(0), packet(NULL), max_concurrent_frames(OPEN_MP_NUM_PROCESSORS), audio_pts(0),
76 video_pts(0), pFormatCtx(NULL), videoStream(-1), audioStream(-1), pCodecCtx(NULL), aCodecCtx(NULL),
77 pStream(NULL), aStream(NULL), pFrame(NULL), previous_packet_location{-1,0},
78 hold_packet(false) {
79
80 // Initialize FFMpeg, and register all formats and codecs
83
84 // Init timestamp offsets
85 pts_offset_seconds = NO_PTS_OFFSET;
86 video_pts_seconds = NO_PTS_OFFSET;
87 audio_pts_seconds = NO_PTS_OFFSET;
88
89 // Init cache
90 working_cache.SetMaxBytesFromInfo(max_concurrent_frames * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
92
93 // Open and Close the reader, to populate its attributes (such as height, width, etc...)
94 if (inspect_reader) {
95 Open();
96 Close();
97 }
98}
99
101 if (is_open)
102 // Auto close reader if not already done
103 Close();
104}
105
106// This struct holds the associated video frame and starting sample # for an audio packet.
107bool AudioLocation::is_near(AudioLocation location, int samples_per_frame, int64_t amount) {
108 // Is frame even close to this one?
109 if (abs(location.frame - frame) >= 2)
110 // This is too far away to be considered
111 return false;
112
113 // Note that samples_per_frame can vary slightly frame to frame when the
114 // audio sampling rate is not an integer multiple of the video fps.
115 int64_t diff = samples_per_frame * (location.frame - frame) + location.sample_start - sample_start;
116 if (abs(diff) <= amount)
117 // close
118 return true;
119
120 // not close
121 return false;
122}
123
124#if USE_HW_ACCEL
125
126// Get hardware pix format
127static enum AVPixelFormat get_hw_dec_format(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts)
128{
129 const enum AVPixelFormat *p;
130
131 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
132 switch (*p) {
133#if defined(__linux__)
134 // Linux pix formats
135 case AV_PIX_FMT_VAAPI:
136 hw_de_av_pix_fmt_global = AV_PIX_FMT_VAAPI;
137 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VAAPI;
138 return *p;
139 break;
140 case AV_PIX_FMT_VDPAU:
141 hw_de_av_pix_fmt_global = AV_PIX_FMT_VDPAU;
142 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VDPAU;
143 return *p;
144 break;
145#endif
146#if defined(_WIN32)
147 // Windows pix formats
148 case AV_PIX_FMT_DXVA2_VLD:
149 hw_de_av_pix_fmt_global = AV_PIX_FMT_DXVA2_VLD;
150 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_DXVA2;
151 return *p;
152 break;
153 case AV_PIX_FMT_D3D11:
154 hw_de_av_pix_fmt_global = AV_PIX_FMT_D3D11;
155 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_D3D11VA;
156 return *p;
157 break;
158#endif
159#if defined(__APPLE__)
160 // Apple pix formats
161 case AV_PIX_FMT_VIDEOTOOLBOX:
162 hw_de_av_pix_fmt_global = AV_PIX_FMT_VIDEOTOOLBOX;
163 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
164 return *p;
165 break;
166#endif
167 // Cross-platform pix formats
168 case AV_PIX_FMT_CUDA:
169 hw_de_av_pix_fmt_global = AV_PIX_FMT_CUDA;
170 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_CUDA;
171 return *p;
172 break;
173 case AV_PIX_FMT_QSV:
174 hw_de_av_pix_fmt_global = AV_PIX_FMT_QSV;
175 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_QSV;
176 return *p;
177 break;
178 default:
179 // This is only here to silence unused-enum warnings
180 break;
181 }
182 }
183 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::get_hw_dec_format (Unable to decode this file using hardware decode)");
184 return AV_PIX_FMT_NONE;
185}
186
187int FFmpegReader::IsHardwareDecodeSupported(int codecid)
188{
189 int ret;
190 switch (codecid) {
191 case AV_CODEC_ID_H264:
192 case AV_CODEC_ID_MPEG2VIDEO:
193 case AV_CODEC_ID_VC1:
194 case AV_CODEC_ID_WMV1:
195 case AV_CODEC_ID_WMV2:
196 case AV_CODEC_ID_WMV3:
197 ret = 1;
198 break;
199 default :
200 ret = 0;
201 break;
202 }
203 return ret;
204}
205#endif // USE_HW_ACCEL
206
208 // Open reader if not already open
209 if (!is_open) {
210 // Prevent async calls to the following code
211 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
212
213 // Initialize format context
214 pFormatCtx = NULL;
215 {
217 ZmqLogger::Instance()->AppendDebugMethod("Decode hardware acceleration settings", "hw_de_on", hw_de_on, "HARDWARE_DECODER", openshot::Settings::Instance()->HARDWARE_DECODER);
218 }
219
220 // Open video file
221 if (avformat_open_input(&pFormatCtx, path.c_str(), NULL, NULL) != 0)
222 throw InvalidFile("File could not be opened.", path);
223
224 // Retrieve stream information
225 if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
226 throw NoStreamsFound("No streams found in file.", path);
227
228 videoStream = -1;
229 audioStream = -1;
230
231 // Init end-of-file detection variables
232 packet_status.reset(true);
233
234 // Loop through each stream, and identify the video and audio stream index
235 for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++) {
236 // Is this a video stream?
237 if (AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_VIDEO && videoStream < 0) {
238 videoStream = i;
239 packet_status.video_eof = false;
240 packet_status.packets_eof = false;
241 packet_status.end_of_file = false;
242 }
243 // Is this an audio stream?
244 if (AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
245 audioStream = i;
246 packet_status.audio_eof = false;
247 packet_status.packets_eof = false;
248 packet_status.end_of_file = false;
249 }
250 }
251 if (videoStream == -1 && audioStream == -1)
252 throw NoStreamsFound("No video or audio streams found in this file.", path);
253
254 // Is there a video stream?
255 if (videoStream != -1) {
256 // Set the stream index
257 info.video_stream_index = videoStream;
258
259 // Set the codec and codec context pointers
260 pStream = pFormatCtx->streams[videoStream];
261
262 // Find the codec ID from stream
263 const AVCodecID codecId = AV_FIND_DECODER_CODEC_ID(pStream);
264
265 // Get codec and codec context from stream
266 const AVCodec *pCodec = avcodec_find_decoder(codecId);
267 AVDictionary *opts = NULL;
268 int retry_decode_open = 2;
269 // If hw accel is selected but hardware cannot handle repeat with software decoding
270 do {
271 pCodecCtx = AV_GET_CODEC_CONTEXT(pStream, pCodec);
272#if USE_HW_ACCEL
273 if (hw_de_on && (retry_decode_open==2)) {
274 // Up to here no decision is made if hardware or software decode
275 hw_de_supported = IsHardwareDecodeSupported(pCodecCtx->codec_id);
276 }
277#endif
278 retry_decode_open = 0;
279
280 // Set number of threads equal to number of processors (not to exceed 16)
281 pCodecCtx->thread_count = std::min(FF_NUM_PROCESSORS, 16);
282
283 if (pCodec == NULL) {
284 throw InvalidCodec("A valid video codec could not be found for this file.", path);
285 }
286
287 // Init options
288 av_dict_set(&opts, "strict", "experimental", 0);
289#if USE_HW_ACCEL
290 if (hw_de_on && hw_de_supported) {
291 // Open Hardware Acceleration
292 int i_decoder_hw = 0;
293 char adapter[256];
294 char *adapter_ptr = NULL;
295 int adapter_num;
297 fprintf(stderr, "Hardware decoding device number: %d\n", adapter_num);
298
299 // Set hardware pix format (callback)
300 pCodecCtx->get_format = get_hw_dec_format;
301
302 if (adapter_num < 3 && adapter_num >=0) {
303#if defined(__linux__)
304 snprintf(adapter,sizeof(adapter),"/dev/dri/renderD%d", adapter_num+128);
305 adapter_ptr = adapter;
307 switch (i_decoder_hw) {
308 case 1:
309 hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
310 break;
311 case 2:
312 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
313 break;
314 case 6:
315 hw_de_av_device_type = AV_HWDEVICE_TYPE_VDPAU;
316 break;
317 case 7:
318 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
319 break;
320 default:
321 hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
322 break;
323 }
324
325#elif defined(_WIN32)
326 adapter_ptr = NULL;
328 switch (i_decoder_hw) {
329 case 2:
330 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
331 break;
332 case 3:
333 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
334 break;
335 case 4:
336 hw_de_av_device_type = AV_HWDEVICE_TYPE_D3D11VA;
337 break;
338 case 7:
339 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
340 break;
341 default:
342 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
343 break;
344 }
345#elif defined(__APPLE__)
346 adapter_ptr = NULL;
348 switch (i_decoder_hw) {
349 case 5:
350 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
351 break;
352 case 7:
353 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
354 break;
355 default:
356 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
357 break;
358 }
359#endif
360
361 } else {
362 adapter_ptr = NULL; // Just to be sure
363 }
364
365 // Check if it is there and writable
366#if defined(__linux__)
367 if( adapter_ptr != NULL && access( adapter_ptr, W_OK ) == 0 ) {
368#elif defined(_WIN32)
369 if( adapter_ptr != NULL ) {
370#elif defined(__APPLE__)
371 if( adapter_ptr != NULL ) {
372#endif
373 ZmqLogger::Instance()->AppendDebugMethod("Decode Device present using device");
374 }
375 else {
376 adapter_ptr = NULL; // use default
377 ZmqLogger::Instance()->AppendDebugMethod("Decode Device not present using default");
378 }
379
380 hw_device_ctx = NULL;
381 // Here the first hardware initialisations are made
382 if (av_hwdevice_ctx_create(&hw_device_ctx, hw_de_av_device_type, adapter_ptr, NULL, 0) >= 0) {
383 if (!(pCodecCtx->hw_device_ctx = av_buffer_ref(hw_device_ctx))) {
384 throw InvalidCodec("Hardware device reference create failed.", path);
385 }
386
387 /*
388 av_buffer_unref(&ist->hw_frames_ctx);
389 ist->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx);
390 if (!ist->hw_frames_ctx) {
391 av_log(avctx, AV_LOG_ERROR, "Error creating a CUDA frames context\n");
392 return AVERROR(ENOMEM);
393 }
394
395 frames_ctx = (AVHWFramesContext*)ist->hw_frames_ctx->data;
396
397 frames_ctx->format = AV_PIX_FMT_CUDA;
398 frames_ctx->sw_format = avctx->sw_pix_fmt;
399 frames_ctx->width = avctx->width;
400 frames_ctx->height = avctx->height;
401
402 av_log(avctx, AV_LOG_DEBUG, "Initializing CUDA frames context: sw_format = %s, width = %d, height = %d\n",
403 av_get_pix_fmt_name(frames_ctx->sw_format), frames_ctx->width, frames_ctx->height);
404
405
406 ret = av_hwframe_ctx_init(pCodecCtx->hw_device_ctx);
407 ret = av_hwframe_ctx_init(ist->hw_frames_ctx);
408 if (ret < 0) {
409 av_log(avctx, AV_LOG_ERROR, "Error initializing a CUDA frame pool\n");
410 return ret;
411 }
412 */
413 }
414 else {
415 throw InvalidCodec("Hardware device create failed.", path);
416 }
417 }
418#endif // USE_HW_ACCEL
419
420 // Disable per-frame threading for album arts
421 // Using FF_THREAD_FRAME adds one frame decoding delay per thread,
422 // but there's only one frame in this case.
423 if (HasAlbumArt())
424 {
425 pCodecCtx->thread_type &= ~FF_THREAD_FRAME;
426 }
427
428 // Open video codec
429 int avcodec_return = avcodec_open2(pCodecCtx, pCodec, &opts);
430 if (avcodec_return < 0) {
431 std::stringstream avcodec_error_msg;
432 avcodec_error_msg << "A video codec was found, but could not be opened. Error: " << av_err2string(avcodec_return);
433 throw InvalidCodec(avcodec_error_msg.str(), path);
434 }
435
436#if USE_HW_ACCEL
437 if (hw_de_on && hw_de_supported) {
438 AVHWFramesConstraints *constraints = NULL;
439 void *hwconfig = NULL;
440 hwconfig = av_hwdevice_hwconfig_alloc(hw_device_ctx);
441
442// TODO: needs va_config!
443#if ENABLE_VAAPI
444 ((AVVAAPIHWConfig *)hwconfig)->config_id = ((VAAPIDecodeContext *)(pCodecCtx->priv_data))->va_config;
445 constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx,hwconfig);
446#endif // ENABLE_VAAPI
447 if (constraints) {
448 if (pCodecCtx->coded_width < constraints->min_width ||
449 pCodecCtx->coded_height < constraints->min_height ||
450 pCodecCtx->coded_width > constraints->max_width ||
451 pCodecCtx->coded_height > constraints->max_height) {
452 ZmqLogger::Instance()->AppendDebugMethod("DIMENSIONS ARE TOO LARGE for hardware acceleration\n");
453 hw_de_supported = 0;
454 retry_decode_open = 1;
455 AV_FREE_CONTEXT(pCodecCtx);
456 if (hw_device_ctx) {
457 av_buffer_unref(&hw_device_ctx);
458 hw_device_ctx = NULL;
459 }
460 }
461 else {
462 // All is just peachy
463 ZmqLogger::Instance()->AppendDebugMethod("\nDecode hardware acceleration is used\n", "Min width :", constraints->min_width, "Min Height :", constraints->min_height, "MaxWidth :", constraints->max_width, "MaxHeight :", constraints->max_height, "Frame width :", pCodecCtx->coded_width, "Frame height :", pCodecCtx->coded_height);
464 retry_decode_open = 0;
465 }
466 av_hwframe_constraints_free(&constraints);
467 if (hwconfig) {
468 av_freep(&hwconfig);
469 }
470 }
471 else {
472 int max_h, max_w;
473 //max_h = ((getenv( "LIMIT_HEIGHT_MAX" )==NULL) ? MAX_SUPPORTED_HEIGHT : atoi(getenv( "LIMIT_HEIGHT_MAX" )));
475 //max_w = ((getenv( "LIMIT_WIDTH_MAX" )==NULL) ? MAX_SUPPORTED_WIDTH : atoi(getenv( "LIMIT_WIDTH_MAX" )));
477 ZmqLogger::Instance()->AppendDebugMethod("Constraints could not be found using default limit\n");
478 //cerr << "Constraints could not be found using default limit\n";
479 if (pCodecCtx->coded_width < 0 ||
480 pCodecCtx->coded_height < 0 ||
481 pCodecCtx->coded_width > max_w ||
482 pCodecCtx->coded_height > max_h ) {
483 ZmqLogger::Instance()->AppendDebugMethod("DIMENSIONS ARE TOO LARGE for hardware acceleration\n", "Max Width :", max_w, "Max Height :", max_h, "Frame width :", pCodecCtx->coded_width, "Frame height :", pCodecCtx->coded_height);
484 hw_de_supported = 0;
485 retry_decode_open = 1;
486 AV_FREE_CONTEXT(pCodecCtx);
487 if (hw_device_ctx) {
488 av_buffer_unref(&hw_device_ctx);
489 hw_device_ctx = NULL;
490 }
491 }
492 else {
493 ZmqLogger::Instance()->AppendDebugMethod("\nDecode hardware acceleration is used\n", "Max Width :", max_w, "Max Height :", max_h, "Frame width :", pCodecCtx->coded_width, "Frame height :", pCodecCtx->coded_height);
494 retry_decode_open = 0;
495 }
496 }
497 } // if hw_de_on && hw_de_supported
498 else {
499 ZmqLogger::Instance()->AppendDebugMethod("\nDecode in software is used\n");
500 }
501#else
502 retry_decode_open = 0;
503#endif // USE_HW_ACCEL
504 } while (retry_decode_open); // retry_decode_open
505 // Free options
506 av_dict_free(&opts);
507
508 // Update the File Info struct with video details (if a video stream is found)
509 UpdateVideoInfo();
510 }
511
512 // Is there an audio stream?
513 if (audioStream != -1) {
514 // Set the stream index
515 info.audio_stream_index = audioStream;
516
517 // Get a pointer to the codec context for the audio stream
518 aStream = pFormatCtx->streams[audioStream];
519
520 // Find the codec ID from stream
521 AVCodecID codecId = AV_FIND_DECODER_CODEC_ID(aStream);
522
523 // Get codec and codec context from stream
524 const AVCodec *aCodec = avcodec_find_decoder(codecId);
525 aCodecCtx = AV_GET_CODEC_CONTEXT(aStream, aCodec);
526
527 // Set number of threads equal to number of processors (not to exceed 16)
528 aCodecCtx->thread_count = std::min(FF_NUM_PROCESSORS, 16);
529
530 if (aCodec == NULL) {
531 throw InvalidCodec("A valid audio codec could not be found for this file.", path);
532 }
533
534 // Init options
535 AVDictionary *opts = NULL;
536 av_dict_set(&opts, "strict", "experimental", 0);
537
538 // Open audio codec
539 if (avcodec_open2(aCodecCtx, aCodec, &opts) < 0)
540 throw InvalidCodec("An audio codec was found, but could not be opened.", path);
541
542 // Free options
543 av_dict_free(&opts);
544
545 // Update the File Info struct with audio details (if an audio stream is found)
546 UpdateAudioInfo();
547 }
548
549 // Add format metadata (if any)
550 AVDictionaryEntry *tag = NULL;
551 while ((tag = av_dict_get(pFormatCtx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
552 QString str_key = tag->key;
553 QString str_value = tag->value;
554 info.metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
555 }
556
557 // Init previous audio location to zero
558 previous_packet_location.frame = -1;
559 previous_packet_location.sample_start = 0;
560
561 // Adjust cache size based on size of frame and audio
562 working_cache.SetMaxBytesFromInfo(max_concurrent_frames * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
564
565 // Scan PTS for any offsets (i.e. non-zero starting streams). At least 1 stream must start at zero timestamp.
566 // This method allows us to shift timestamps to ensure at least 1 stream is starting at zero.
567 UpdatePTSOffset();
568
569 // Override an invalid framerate
570 if (info.fps.ToFloat() > 240.0f || (info.fps.num <= 0 || info.fps.den <= 0) || info.video_length <= 0) {
571 // Calculate FPS, duration, video bit rate, and video length manually
572 // by scanning through all the video stream packets
573 CheckFPS();
574 }
575
576 // Mark as "open"
577 is_open = true;
578
579 // Seek back to beginning of file (if not already seeking)
580 if (!is_seeking) {
581 Seek(1);
582 }
583 }
584}
585
587 // Close all objects, if reader is 'open'
588 if (is_open) {
589 // Prevent async calls to the following code
590 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
591
592 // Mark as "closed"
593 is_open = false;
594
595 // Keep track of most recent packet
596 AVPacket *recent_packet = packet;
597
598 // Drain any packets from the decoder
599 packet = NULL;
600 int attempts = 0;
601 int max_attempts = 128;
602 while (packet_status.packets_decoded() < packet_status.packets_read() && attempts < max_attempts) {
603 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::Close (Drain decoder loop)",
604 "packets_read", packet_status.packets_read(),
605 "packets_decoded", packet_status.packets_decoded(),
606 "attempts", attempts);
607 if (packet_status.video_decoded < packet_status.video_read) {
608 ProcessVideoPacket(info.video_length);
609 }
610 if (packet_status.audio_decoded < packet_status.audio_read) {
611 ProcessAudioPacket(info.video_length);
612 }
613 attempts++;
614 }
615
616 // Remove packet
617 if (recent_packet) {
618 RemoveAVPacket(recent_packet);
619 }
620
621 // Close the video codec
622 if (info.has_video) {
623 if(avcodec_is_open(pCodecCtx)) {
624 avcodec_flush_buffers(pCodecCtx);
625 }
626 AV_FREE_CONTEXT(pCodecCtx);
627#if USE_HW_ACCEL
628 if (hw_de_on) {
629 if (hw_device_ctx) {
630 av_buffer_unref(&hw_device_ctx);
631 hw_device_ctx = NULL;
632 }
633 }
634#endif // USE_HW_ACCEL
635 }
636
637 // Close the audio codec
638 if (info.has_audio) {
639 if(avcodec_is_open(aCodecCtx)) {
640 avcodec_flush_buffers(aCodecCtx);
641 }
642 AV_FREE_CONTEXT(aCodecCtx);
643 }
644
645 // Clear final cache
647 working_cache.Clear();
648
649 // Close the video file
650 avformat_close_input(&pFormatCtx);
651 av_freep(&pFormatCtx);
652
653 // Reset some variables
654 last_frame = 0;
655 hold_packet = false;
656 largest_frame_processed = 0;
657 seek_audio_frame_found = 0;
658 seek_video_frame_found = 0;
659 current_video_frame = 0;
660 last_video_frame.reset();
661 }
662}
663
664bool FFmpegReader::HasAlbumArt() {
665 // Check if the video stream we use is an attached picture
666 // This won't return true if the file has a cover image as a secondary stream
667 // like an MKV file with an attached image file
668 return pFormatCtx && videoStream >= 0 && pFormatCtx->streams[videoStream]
669 && (pFormatCtx->streams[videoStream]->disposition & AV_DISPOSITION_ATTACHED_PIC);
670}
671
672void FFmpegReader::UpdateAudioInfo() {
673 // Set default audio channel layout (if needed)
674 if (AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout == 0)
675 AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout = av_get_default_channel_layout(AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channels);
676
677 if (info.sample_rate > 0) {
678 // Skip init - if info struct already populated
679 return;
680 }
681
682 // Set values of FileInfo struct
683 info.has_audio = true;
684 info.file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
685 info.acodec = aCodecCtx->codec->name;
686 info.channels = AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channels;
687 info.channel_layout = (ChannelLayout) AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout;
688 info.sample_rate = AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->sample_rate;
689 info.audio_bit_rate = AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->bit_rate;
690 if (info.audio_bit_rate <= 0) {
691 // Get bitrate from format
692 info.audio_bit_rate = pFormatCtx->bit_rate;
693 }
694
695 // Set audio timebase
696 info.audio_timebase.num = aStream->time_base.num;
697 info.audio_timebase.den = aStream->time_base.den;
698
699 // Get timebase of audio stream (if valid) and greater than the current duration
700 if (aStream->duration > 0 && aStream->duration > info.duration) {
701 // Get duration from audio stream
702 info.duration = aStream->duration * info.audio_timebase.ToDouble();
703 } else if (pFormatCtx->duration > 0 && info.duration <= 0.0f) {
704 // Use the format's duration
705 info.duration = float(pFormatCtx->duration) / AV_TIME_BASE;
706 }
707
708 // Calculate duration from filesize and bitrate (if any)
709 if (info.duration <= 0.0f && info.video_bit_rate > 0 && info.file_size > 0) {
710 // Estimate from bitrate, total bytes, and framerate
712 }
713
714 // Check for an invalid video length
715 if (info.has_video && info.video_length <= 0) {
716 // Calculate the video length from the audio duration
718 }
719
720 // Set video timebase (if no video stream was found)
721 if (!info.has_video) {
722 // Set a few important default video settings (so audio can be divided into frames)
723 info.fps.num = 24;
724 info.fps.den = 1;
728 info.width = 720;
729 info.height = 480;
730
731 // Use timeline to set correct width & height (if any)
732 Clip *parent = static_cast<Clip *>(ParentClip());
733 if (parent) {
734 if (parent->ParentTimeline()) {
735 // Set max width/height based on parent clip's timeline (if attached to a timeline)
738 }
739 }
740 }
741
742 // Fix invalid video lengths for certain types of files (MP3 for example)
743 if (info.has_video && ((info.duration * info.fps.ToDouble()) - info.video_length > 60)) {
745 }
746
747 // Add audio metadata (if any found)
748 AVDictionaryEntry *tag = NULL;
749 while ((tag = av_dict_get(aStream->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
750 QString str_key = tag->key;
751 QString str_value = tag->value;
752 info.metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
753 }
754}
755
756void FFmpegReader::UpdateVideoInfo() {
757 if (info.vcodec.length() > 0) {
758 // Skip init - if info struct already populated
759 return;
760 }
761
762 // Set values of FileInfo struct
763 info.has_video = true;
764 info.file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
765 info.height = AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->height;
766 info.width = AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->width;
767 info.vcodec = pCodecCtx->codec->name;
768 info.video_bit_rate = (pFormatCtx->bit_rate / 8);
769
770 // Frame rate from the container and codec
771 AVRational framerate = av_guess_frame_rate(pFormatCtx, pStream, NULL);
772 if (!check_fps) {
773 info.fps.num = framerate.num;
774 info.fps.den = framerate.den;
775 }
776
777 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::UpdateVideoInfo", "info.fps.num", info.fps.num, "info.fps.den", info.fps.den);
778
779 // TODO: remove excessive debug info in the next releases
780 // The debug info below is just for comparison and troubleshooting on users side during the transition period
781 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::UpdateVideoInfo (pStream->avg_frame_rate)", "num", pStream->avg_frame_rate.num, "den", pStream->avg_frame_rate.den);
782
783 if (pStream->sample_aspect_ratio.num != 0) {
784 info.pixel_ratio.num = pStream->sample_aspect_ratio.num;
785 info.pixel_ratio.den = pStream->sample_aspect_ratio.den;
786 } else if (AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->sample_aspect_ratio.num != 0) {
787 info.pixel_ratio.num = AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->sample_aspect_ratio.num;
788 info.pixel_ratio.den = AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->sample_aspect_ratio.den;
789 } else {
790 info.pixel_ratio.num = 1;
791 info.pixel_ratio.den = 1;
792 }
793 info.pixel_format = AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx);
794
795 // Calculate the DAR (display aspect ratio)
797
798 // Reduce size fraction
799 size.Reduce();
800
801 // Set the ratio based on the reduced fraction
802 info.display_ratio.num = size.num;
803 info.display_ratio.den = size.den;
804
805 // Get scan type and order from codec context/params
806 if (!check_interlace) {
807 check_interlace = true;
808 AVFieldOrder field_order = AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->field_order;
809 switch(field_order) {
810 case AV_FIELD_PROGRESSIVE:
811 info.interlaced_frame = false;
812 break;
813 case AV_FIELD_TT:
814 case AV_FIELD_TB:
815 info.interlaced_frame = true;
816 info.top_field_first = true;
817 break;
818 case AV_FIELD_BT:
819 case AV_FIELD_BB:
820 info.interlaced_frame = true;
821 info.top_field_first = false;
822 break;
823 case AV_FIELD_UNKNOWN:
824 // Check again later?
825 check_interlace = false;
826 break;
827 }
828 // check_interlace will prevent these checks being repeated,
829 // unless it was cleared because we got an AV_FIELD_UNKNOWN response.
830 }
831
832 // Set the video timebase
833 info.video_timebase.num = pStream->time_base.num;
834 info.video_timebase.den = pStream->time_base.den;
835
836 // Set the duration in seconds, and video length (# of frames)
837 info.duration = pStream->duration * info.video_timebase.ToDouble();
838
839 // Check for valid duration (if found)
840 if (info.duration <= 0.0f && pFormatCtx->duration >= 0) {
841 // Use the format's duration
842 info.duration = float(pFormatCtx->duration) / AV_TIME_BASE;
843 }
844
845 // Calculate duration from filesize and bitrate (if any)
846 if (info.duration <= 0.0f && info.video_bit_rate > 0 && info.file_size > 0) {
847 // Estimate from bitrate, total bytes, and framerate
849 }
850
851 // Certain "image" formats do not have a valid duration
852 if (info.duration <= 0.0f && pStream->duration == AV_NOPTS_VALUE && pFormatCtx->duration == AV_NOPTS_VALUE) {
853 // Force an "image" duration
854 info.duration = 60 * 60 * 1; // 1 hour duration
855 info.video_length = 1;
856 info.has_single_image = true;
857 }
858
859 // Get the # of video frames (if found in stream)
860 // Only set this 1 time (this method can be called multiple times)
861 if (pStream->nb_frames > 0 && info.video_length <= 0) {
862 info.video_length = pStream->nb_frames;
863 }
864
865 // No duration found in stream of file
866 if (info.duration <= 0.0f) {
867 // No duration is found in the video stream
868 info.duration = -1;
869 info.video_length = -1;
870 is_duration_known = false;
871 } else {
872 // Yes, a duration was found
873 is_duration_known = true;
874
875 // Calculate number of frames (if not already found in metadata)
876 // Only set this 1 time (this method can be called multiple times)
877 if (info.video_length <= 0) {
879 }
880 }
881
882 // Add video metadata (if any)
883 AVDictionaryEntry *tag = NULL;
884 while ((tag = av_dict_get(pStream->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
885 QString str_key = tag->key;
886 QString str_value = tag->value;
887 info.metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
888 }
889}
890
892 return this->is_duration_known;
893}
894
895std::shared_ptr<Frame> FFmpegReader::GetFrame(int64_t requested_frame) {
896 // Check for open reader (or throw exception)
897 if (!is_open)
898 throw ReaderClosed("The FFmpegReader is closed. Call Open() before calling this method.", path);
899
900 // Adjust for a requested frame that is too small or too large
901 if (requested_frame < 1)
902 requested_frame = 1;
903 if (requested_frame > info.video_length && is_duration_known)
904 requested_frame = info.video_length;
905 if (info.has_video && info.video_length == 0)
906 // Invalid duration of video file
907 throw InvalidFile("Could not detect the duration of the video or audio stream.", path);
908
909 // Debug output
910 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetFrame", "requested_frame", requested_frame, "last_frame", last_frame);
911
912 // Check the cache for this frame
913 std::shared_ptr<Frame> frame = final_cache.GetFrame(requested_frame);
914 if (frame) {
915 // Debug output
916 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetFrame", "returned cached frame", requested_frame);
917
918 // Return the cached frame
919 return frame;
920 } else {
921
922 // Prevent async calls to the remainder of this code
923 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
924
925 // Check the cache a 2nd time (due to the potential previous lock)
926 frame = final_cache.GetFrame(requested_frame);
927 if (frame) {
928 // Debug output
929 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetFrame", "returned cached frame on 2nd look", requested_frame);
930
931 } else {
932 // Frame is not in cache
933 // Reset seek count
934 seek_count = 0;
935
936 // Are we within X frames of the requested frame?
937 int64_t diff = requested_frame - last_frame;
938 if (diff >= 1 && diff <= 20) {
939 // Continue walking the stream
940 frame = ReadStream(requested_frame);
941 } else {
942 // Greater than 30 frames away, or backwards, we need to seek to the nearest key frame
943 if (enable_seek) {
944 // Only seek if enabled
945 Seek(requested_frame);
946
947 } else if (!enable_seek && diff < 0) {
948 // Start over, since we can't seek, and the requested frame is smaller than our position
949 // Since we are seeking to frame 1, this actually just closes/re-opens the reader
950 Seek(1);
951 }
952
953 // Then continue walking the stream
954 frame = ReadStream(requested_frame);
955 }
956 }
957 return frame;
958 }
959}
960
961// Read the stream until we find the requested Frame
962std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame) {
963 // Allocate video frame
964 bool check_seek = false;
965 int packet_error = -1;
966
967 // Debug output
968 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream", "requested_frame", requested_frame, "max_concurrent_frames", max_concurrent_frames);
969
970 // Loop through the stream until the correct frame is found
971 while (true) {
972 // Check if working frames are 'finished'
973 if (!is_seeking) {
974 // Check for final frames
975 CheckWorkingFrames(requested_frame);
976 }
977
978 // Check if requested 'final' frame is available (and break out of loop if found)
979 bool is_cache_found = (final_cache.GetFrame(requested_frame) != NULL);
980 if (is_cache_found) {
981 break;
982 }
983
984 if (!hold_packet || !packet) {
985 // Get the next packet
986 packet_error = GetNextPacket();
987 if (packet_error < 0 && !packet) {
988 // No more packets to be found
989 packet_status.packets_eof = true;
990 }
991 }
992
993 // Debug output
994 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (GetNextPacket)", "requested_frame", requested_frame,"packets_read", packet_status.packets_read(), "packets_decoded", packet_status.packets_decoded(), "is_seeking", is_seeking);
995
996 // Check the status of a seek (if any)
997 if (is_seeking) {
998 check_seek = CheckSeek(false);
999 } else {
1000 check_seek = false;
1001 }
1002
1003 if (check_seek) {
1004 // Packet may become NULL on Close inside Seek if CheckSeek returns false
1005 // Jump to the next iteration of this loop
1006 continue;
1007 }
1008
1009 // Video packet
1010 if ((info.has_video && packet && packet->stream_index == videoStream) ||
1011 (info.has_video && packet_status.video_decoded < packet_status.video_read) ||
1012 (info.has_video && !packet && !packet_status.video_eof)) {
1013 // Process Video Packet
1014 ProcessVideoPacket(requested_frame);
1015 }
1016 // Audio packet
1017 if ((info.has_audio && packet && packet->stream_index == audioStream) ||
1018 (info.has_audio && !packet && packet_status.audio_decoded < packet_status.audio_read) ||
1019 (info.has_audio && !packet && !packet_status.audio_eof)) {
1020 // Process Audio Packet
1021 ProcessAudioPacket(requested_frame);
1022 }
1023
1024 // Remove unused packets (sometimes we purposely ignore video or audio packets,
1025 // if the has_video or has_audio properties are manually overridden)
1026 if ((!info.has_video && packet && packet->stream_index == videoStream) ||
1027 (!info.has_audio && packet && packet->stream_index == audioStream)) {
1028 // Keep track of deleted packet counts
1029 if (packet->stream_index == videoStream) {
1030 packet_status.video_decoded++;
1031 } else if (packet->stream_index == audioStream) {
1032 packet_status.audio_decoded++;
1033 }
1034
1035 // Remove unused packets (sometimes we purposely ignore video or audio packets,
1036 // if the has_video or has_audio properties are manually overridden)
1037 RemoveAVPacket(packet);
1038 packet = NULL;
1039 }
1040
1041 // Determine end-of-stream (waiting until final decoder threads finish)
1042 // Force end-of-stream in some situations
1043 packet_status.end_of_file = packet_status.packets_eof && packet_status.video_eof && packet_status.audio_eof;
1044 if ((packet_status.packets_eof && packet_status.packets_read() == packet_status.packets_decoded()) || packet_status.end_of_file) {
1045 // Force EOF (end of file) variables to true, if decoder does not support EOF detection.
1046 // If we have no more packets, and all known packets have been decoded
1047 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (force EOF)", "packets_read", packet_status.packets_read(), "packets_decoded", packet_status.packets_decoded(), "packets_eof", packet_status.packets_eof, "video_eof", packet_status.video_eof, "audio_eof", packet_status.audio_eof, "end_of_file", packet_status.end_of_file);
1048 if (!packet_status.video_eof) {
1049 packet_status.video_eof = true;
1050 }
1051 if (!packet_status.audio_eof) {
1052 packet_status.audio_eof = true;
1053 }
1054 packet_status.end_of_file = true;
1055 break;
1056 }
1057 } // end while
1058
1059 // Debug output
1060 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (Completed)",
1061 "packets_read", packet_status.packets_read(),
1062 "packets_decoded", packet_status.packets_decoded(),
1063 "end_of_file", packet_status.end_of_file,
1064 "largest_frame_processed", largest_frame_processed,
1065 "Working Cache Count", working_cache.Count());
1066
1067 // Have we reached end-of-stream (or the final frame)?
1068 if (!packet_status.end_of_file && requested_frame >= info.video_length) {
1069 // Force end-of-stream
1070 packet_status.end_of_file = true;
1071 }
1072 if (packet_status.end_of_file) {
1073 // Mark any other working frames as 'finished'
1074 CheckWorkingFrames(requested_frame);
1075 }
1076
1077 // Return requested frame (if found)
1078 std::shared_ptr<Frame> frame = final_cache.GetFrame(requested_frame);
1079 if (frame)
1080 // Return prepared frame
1081 return frame;
1082 else {
1083
1084 // Check if largest frame is still cached
1085 frame = final_cache.GetFrame(largest_frame_processed);
1086 int samples_in_frame = Frame::GetSamplesPerFrame(requested_frame, info.fps,
1088 if (frame) {
1089 // Copy and return the largest processed frame (assuming it was the last in the video file)
1090 std::shared_ptr<Frame> f = CreateFrame(largest_frame_processed);
1091
1092 // Use solid color (if no image data found)
1093 if (!frame->has_image_data) {
1094 // Use solid black frame if no image data available
1095 f->AddColor(info.width, info.height, "#000");
1096 }
1097 // Silence audio data (if any), since we are repeating the last frame
1098 frame->AddAudioSilence(samples_in_frame);
1099
1100 return frame;
1101 } else {
1102 // The largest processed frame is no longer in cache, return a blank frame
1103 std::shared_ptr<Frame> f = CreateFrame(largest_frame_processed);
1104 f->AddColor(info.width, info.height, "#000");
1105 f->AddAudioSilence(samples_in_frame);
1106 return f;
1107 }
1108 }
1109
1110}
1111
1112// Get the next packet (if any)
1113int FFmpegReader::GetNextPacket() {
1114 int found_packet = 0;
1115 AVPacket *next_packet;
1116 next_packet = new AVPacket();
1117 found_packet = av_read_frame(pFormatCtx, next_packet);
1118
1119 if (packet) {
1120 // Remove previous packet before getting next one
1121 RemoveAVPacket(packet);
1122 packet = NULL;
1123 }
1124 if (found_packet >= 0) {
1125 // Update current packet pointer
1126 packet = next_packet;
1127
1128 // Keep track of packet stats
1129 if (packet->stream_index == videoStream) {
1130 packet_status.video_read++;
1131 } else if (packet->stream_index == audioStream) {
1132 packet_status.audio_read++;
1133 }
1134 } else {
1135 // No more packets found
1136 delete next_packet;
1137 packet = NULL;
1138 }
1139 // Return if packet was found (or error number)
1140 return found_packet;
1141}
1142
1143// Get an AVFrame (if any)
1144bool FFmpegReader::GetAVFrame() {
1145 int frameFinished = 0;
1146
1147 // Decode video frame
1148 AVFrame *next_frame = AV_ALLOCATE_FRAME();
1149
1150#if IS_FFMPEG_3_2
1151 int send_packet_err = 0;
1152 int64_t send_packet_pts = 0;
1153 if ((packet && packet->stream_index == videoStream) || !packet) {
1154 send_packet_err = avcodec_send_packet(pCodecCtx, packet);
1155
1156 if (packet && send_packet_err >= 0) {
1157 send_packet_pts = GetPacketPTS();
1158 hold_packet = false;
1159 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (send packet succeeded)", "send_packet_err", send_packet_err, "send_packet_pts", send_packet_pts);
1160 }
1161 }
1162
1163 #if USE_HW_ACCEL
1164 // Get the format from the variables set in get_hw_dec_format
1165 hw_de_av_pix_fmt = hw_de_av_pix_fmt_global;
1166 hw_de_av_device_type = hw_de_av_device_type_global;
1167 #endif // USE_HW_ACCEL
1168 if (send_packet_err < 0 && send_packet_err != AVERROR_EOF) {
1169 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (send packet: Not sent [" + av_err2string(send_packet_err) + "])", "send_packet_err", send_packet_err, "send_packet_pts", send_packet_pts);
1170 if (send_packet_err == AVERROR(EAGAIN)) {
1171 hold_packet = true;
1172 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (send packet: AVERROR(EAGAIN): user must read output with avcodec_receive_frame()", "send_packet_pts", send_packet_pts);
1173 }
1174 if (send_packet_err == AVERROR(EINVAL)) {
1175 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (send packet: AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush", "send_packet_pts", send_packet_pts);
1176 }
1177 if (send_packet_err == AVERROR(ENOMEM)) {
1178 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (send packet: AVERROR(ENOMEM): failed to add packet to internal queue, or legitimate decoding errors", "send_packet_pts", send_packet_pts);
1179 }
1180 }
1181
1182 // Always try and receive a packet, if not EOF.
1183 // Even if the above avcodec_send_packet failed to send,
1184 // we might still need to receive a packet.
1185 int receive_frame_err = 0;
1186 AVFrame *next_frame2;
1187#if USE_HW_ACCEL
1188 if (hw_de_on && hw_de_supported) {
1189 next_frame2 = AV_ALLOCATE_FRAME();
1190 }
1191 else
1192#endif // USE_HW_ACCEL
1193 {
1194 next_frame2 = next_frame;
1195 }
1196 pFrame = AV_ALLOCATE_FRAME();
1197 while (receive_frame_err >= 0) {
1198 receive_frame_err = avcodec_receive_frame(pCodecCtx, next_frame2);
1199
1200 if (receive_frame_err != 0) {
1201 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (receive frame: frame not ready yet from decoder [\" + av_err2string(receive_frame_err) + \"])", "receive_frame_err", receive_frame_err, "send_packet_pts", send_packet_pts);
1202
1203 if (receive_frame_err == AVERROR_EOF) {
1205 "FFmpegReader::GetAVFrame (receive frame: AVERROR_EOF: EOF detected from decoder, flushing buffers)", "send_packet_pts", send_packet_pts);
1206 avcodec_flush_buffers(pCodecCtx);
1207 packet_status.video_eof = true;
1208 }
1209 if (receive_frame_err == AVERROR(EINVAL)) {
1211 "FFmpegReader::GetAVFrame (receive frame: AVERROR(EINVAL): invalid frame received, flushing buffers)", "send_packet_pts", send_packet_pts);
1212 avcodec_flush_buffers(pCodecCtx);
1213 }
1214 if (receive_frame_err == AVERROR(EAGAIN)) {
1216 "FFmpegReader::GetAVFrame (receive frame: AVERROR(EAGAIN): output is not available in this state - user must try to send new input)", "send_packet_pts", send_packet_pts);
1217 }
1218 if (receive_frame_err == AVERROR_INPUT_CHANGED) {
1220 "FFmpegReader::GetAVFrame (receive frame: AVERROR_INPUT_CHANGED: current decoded frame has changed parameters with respect to first decoded frame)", "send_packet_pts", send_packet_pts);
1221 }
1222
1223 // Break out of decoding loop
1224 // Nothing ready for decoding yet
1225 break;
1226 }
1227
1228#if USE_HW_ACCEL
1229 if (hw_de_on && hw_de_supported) {
1230 int err;
1231 if (next_frame2->format == hw_de_av_pix_fmt) {
1232 next_frame->format = AV_PIX_FMT_YUV420P;
1233 if ((err = av_hwframe_transfer_data(next_frame,next_frame2,0)) < 0) {
1234 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (Failed to transfer data to output frame)", "hw_de_on", hw_de_on);
1235 }
1236 if ((err = av_frame_copy_props(next_frame,next_frame2)) < 0) {
1237 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (Failed to copy props to output frame)", "hw_de_on", hw_de_on);
1238 }
1239 }
1240 }
1241 else
1242#endif // USE_HW_ACCEL
1243 { // No hardware acceleration used -> no copy from GPU memory needed
1244 next_frame = next_frame2;
1245 }
1246
1247 // TODO also handle possible further frames
1248 // Use only the first frame like avcodec_decode_video2
1249 frameFinished = 1;
1250 packet_status.video_decoded++;
1251
1252 av_image_alloc(pFrame->data, pFrame->linesize, info.width, info.height, (AVPixelFormat)(pStream->codecpar->format), 1);
1253 av_image_copy(pFrame->data, pFrame->linesize, (const uint8_t**)next_frame->data, next_frame->linesize,
1254 (AVPixelFormat)(pStream->codecpar->format), info.width, info.height);
1255
1256 // Get display PTS from video frame, often different than packet->pts.
1257 // Sending packets to the decoder (i.e. packet->pts) is async,
1258 // and retrieving packets from the decoder (frame->pts) is async. In most decoders
1259 // sending and retrieving are separated by multiple calls to this method.
1260 if (next_frame->pts != AV_NOPTS_VALUE) {
1261 // This is the current decoded frame (and should be the pts used) for
1262 // processing this data
1263 video_pts = next_frame->pts;
1264 } else if (next_frame->pkt_dts != AV_NOPTS_VALUE) {
1265 // Some videos only set this timestamp (fallback)
1266 video_pts = next_frame->pkt_dts;
1267 }
1268
1270 "FFmpegReader::GetAVFrame (Successful frame received)", "video_pts", video_pts, "send_packet_pts", send_packet_pts);
1271
1272 // break out of loop after each successful image returned
1273 break;
1274 }
1275#if USE_HW_ACCEL
1276 if (hw_de_on && hw_de_supported) {
1277 AV_FREE_FRAME(&next_frame2);
1278 }
1279 #endif // USE_HW_ACCEL
1280#else
1281 avcodec_decode_video2(pCodecCtx, next_frame, &frameFinished, packet);
1282
1283 // always allocate pFrame (because we do that in the ffmpeg >= 3.2 as well); it will always be freed later
1284 pFrame = AV_ALLOCATE_FRAME();
1285
1286 // is frame finished
1287 if (frameFinished) {
1288 // AVFrames are clobbered on the each call to avcodec_decode_video, so we
1289 // must make a copy of the image data before this method is called again.
1290 avpicture_alloc((AVPicture *) pFrame, pCodecCtx->pix_fmt, info.width, info.height);
1291 av_picture_copy((AVPicture *) pFrame, (AVPicture *) next_frame, pCodecCtx->pix_fmt, info.width,
1292 info.height);
1293 }
1294#endif // IS_FFMPEG_3_2
1295
1296 // deallocate the frame
1297 AV_FREE_FRAME(&next_frame);
1298
1299 // Did we get a video frame?
1300 return frameFinished;
1301}
1302
1303// Check the current seek position and determine if we need to seek again
1304bool FFmpegReader::CheckSeek(bool is_video) {
1305 // Are we seeking for a specific frame?
1306 if (is_seeking) {
1307 // Determine if both an audio and video packet have been decoded since the seek happened.
1308 // If not, allow the ReadStream method to keep looping
1309 if ((is_video_seek && !seek_video_frame_found) || (!is_video_seek && !seek_audio_frame_found))
1310 return false;
1311
1312 // Check for both streams
1313 if ((info.has_video && !seek_video_frame_found) || (info.has_audio && !seek_audio_frame_found))
1314 return false;
1315
1316 // Determine max seeked frame
1317 int64_t max_seeked_frame = std::max(seek_audio_frame_found, seek_video_frame_found);
1318
1319 // determine if we are "before" the requested frame
1320 if (max_seeked_frame >= seeking_frame) {
1321 // SEEKED TOO FAR
1322 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckSeek (Too far, seek again)",
1323 "is_video_seek", is_video_seek,
1324 "max_seeked_frame", max_seeked_frame,
1325 "seeking_frame", seeking_frame,
1326 "seeking_pts", seeking_pts,
1327 "seek_video_frame_found", seek_video_frame_found,
1328 "seek_audio_frame_found", seek_audio_frame_found);
1329
1330 // Seek again... to the nearest Keyframe
1331 Seek(seeking_frame - (10 * seek_count * seek_count));
1332 } else {
1333 // SEEK WORKED
1334 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckSeek (Successful)",
1335 "is_video_seek", is_video_seek,
1336 "packet->pts", GetPacketPTS(),
1337 "seeking_pts", seeking_pts,
1338 "seeking_frame", seeking_frame,
1339 "seek_video_frame_found", seek_video_frame_found,
1340 "seek_audio_frame_found", seek_audio_frame_found);
1341
1342 // Seek worked, and we are "before" the requested frame
1343 is_seeking = false;
1344 seeking_frame = 0;
1345 seeking_pts = -1;
1346 }
1347 }
1348
1349 // return the pts to seek to (if any)
1350 return is_seeking;
1351}
1352
1353// Process a video packet
1354void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) {
1355 // Get the AVFrame from the current packet
1356 // This sets the video_pts to the correct timestamp
1357 int frame_finished = GetAVFrame();
1358
1359 // Check if the AVFrame is finished and set it
1360 if (!frame_finished) {
1361 // No AVFrame decoded yet, bail out
1362 if (pFrame) {
1363 RemoveAVFrame(pFrame);
1364 }
1365 return;
1366 }
1367
1368 // Calculate current frame #
1369 int64_t current_frame = ConvertVideoPTStoFrame(video_pts);
1370
1371 // Track 1st video packet after a successful seek
1372 if (!seek_video_frame_found && is_seeking)
1373 seek_video_frame_found = current_frame;
1374
1375 // Create or get the existing frame object. Requested frame needs to be created
1376 // in working_cache at least once. Seek can clear the working_cache, so we must
1377 // add the requested frame back to the working_cache here. If it already exists,
1378 // it will be moved to the top of the working_cache.
1379 working_cache.Add(CreateFrame(requested_frame));
1380
1381 // Debug output
1382 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessVideoPacket (Before)", "requested_frame", requested_frame, "current_frame", current_frame);
1383
1384 // Init some things local (for OpenMP)
1385 PixelFormat pix_fmt = AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx);
1386 int height = info.height;
1387 int width = info.width;
1388 int64_t video_length = info.video_length;
1389
1390 // Create variables for a RGB Frame (since most videos are not in RGB, we must convert it)
1391 AVFrame *pFrameRGB = nullptr;
1392 uint8_t *buffer = nullptr;
1393
1394 // Allocate an AVFrame structure
1395 pFrameRGB = AV_ALLOCATE_FRAME();
1396 if (pFrameRGB == nullptr)
1397 throw OutOfMemory("Failed to allocate frame buffer", path);
1398
1399 // Determine the max size of this source image (based on the timeline's size, the scaling mode,
1400 // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible,
1401 // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline
1402 // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in
1403 // the future.
1404 int max_width = info.width;
1405 int max_height = info.height;
1406
1407 Clip *parent = static_cast<Clip *>(ParentClip());
1408 if (parent) {
1409 if (parent->ParentTimeline()) {
1410 // Set max width/height based on parent clip's timeline (if attached to a timeline)
1411 max_width = parent->ParentTimeline()->preview_width;
1412 max_height = parent->ParentTimeline()->preview_height;
1413 }
1414 if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) {
1415 // Best fit or Stretch scaling (based on max timeline size * scaling keyframes)
1416 float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
1417 float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
1418 max_width = std::max(float(max_width), max_width * max_scale_x);
1419 max_height = std::max(float(max_height), max_height * max_scale_y);
1420
1421 } else if (parent->scale == SCALE_CROP) {
1422 // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes)
1423 float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
1424 float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
1425 QSize width_size(max_width * max_scale_x,
1426 round(max_width / (float(info.width) / float(info.height))));
1427 QSize height_size(round(max_height / (float(info.height) / float(info.width))),
1428 max_height * max_scale_y);
1429 // respect aspect ratio
1430 if (width_size.width() >= max_width && width_size.height() >= max_height) {
1431 max_width = std::max(max_width, width_size.width());
1432 max_height = std::max(max_height, width_size.height());
1433 } else {
1434 max_width = std::max(max_width, height_size.width());
1435 max_height = std::max(max_height, height_size.height());
1436 }
1437
1438 } else {
1439 // Scale video to equivalent unscaled size
1440 // Since the preview window can change sizes, we want to always
1441 // scale against the ratio of original video size to timeline size
1442 float preview_ratio = 1.0;
1443 if (parent->ParentTimeline()) {
1444 Timeline *t = (Timeline *) parent->ParentTimeline();
1445 preview_ratio = t->preview_width / float(t->info.width);
1446 }
1447 float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
1448 float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
1449 max_width = info.width * max_scale_x * preview_ratio;
1450 max_height = info.height * max_scale_y * preview_ratio;
1451 }
1452 }
1453
1454 // Determine if image needs to be scaled (for performance reasons)
1455 int original_height = height;
1456 if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) {
1457 // Override width and height (but maintain aspect ratio)
1458 float ratio = float(width) / float(height);
1459 int possible_width = round(max_height * ratio);
1460 int possible_height = round(max_width / ratio);
1461
1462 if (possible_width <= max_width) {
1463 // use calculated width, and max_height
1464 width = possible_width;
1465 height = max_height;
1466 } else {
1467 // use max_width, and calculated height
1468 width = max_width;
1469 height = possible_height;
1470 }
1471 }
1472
1473 // Determine required buffer size and allocate buffer
1474 const int bytes_per_pixel = 4;
1475 int buffer_size = (width * height * bytes_per_pixel) + 128;
1476 buffer = new unsigned char[buffer_size]();
1477
1478 // Copy picture data from one AVFrame (or AVPicture) to another one.
1479 AV_COPY_PICTURE_DATA(pFrameRGB, buffer, PIX_FMT_RGBA, width, height);
1480
1481 int scale_mode = SWS_FAST_BILINEAR;
1482 if (openshot::Settings::Instance()->HIGH_QUALITY_SCALING) {
1483 scale_mode = SWS_BICUBIC;
1484 }
1485 SwsContext *img_convert_ctx = sws_getContext(info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width,
1486 height, PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL);
1487
1488 // Resize / Convert to RGB
1489 sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0,
1490 original_height, pFrameRGB->data, pFrameRGB->linesize);
1491
1492 // Create or get the existing frame object
1493 std::shared_ptr<Frame> f = CreateFrame(current_frame);
1494
1495 // Add Image data to frame
1496 if (!ffmpeg_has_alpha(AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx))) {
1497 // Add image with no alpha channel, Speed optimization
1498 f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888_Premultiplied, buffer);
1499 } else {
1500 // Add image with alpha channel (this will be converted to premultipled when needed, but is slower)
1501 f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888, buffer);
1502 }
1503
1504 // Update working cache
1505 working_cache.Add(f);
1506
1507 // Keep track of last last_video_frame
1508 last_video_frame = f;
1509
1510 // Free the RGB image
1511 AV_FREE_FRAME(&pFrameRGB);
1512
1513 // Remove frame and packet
1514 RemoveAVFrame(pFrame);
1515 sws_freeContext(img_convert_ctx);
1516
1517 // Get video PTS in seconds
1518 video_pts_seconds = (double(video_pts) * info.video_timebase.ToDouble()) + pts_offset_seconds;
1519
1520 // Debug output
1521 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessVideoPacket (After)", "requested_frame", requested_frame, "current_frame", current_frame, "f->number", f->number, "video_pts_seconds", video_pts_seconds);
1522}
1523
1524// Process an audio packet
1525void FFmpegReader::ProcessAudioPacket(int64_t requested_frame) {
1526 AudioLocation location;
1527 // Calculate location of current audio packet
1528 if (packet && packet->pts != AV_NOPTS_VALUE) {
1529 // Determine related video frame and starting sample # from audio PTS
1530 location = GetAudioPTSLocation(packet->pts);
1531
1532 // Track 1st audio packet after a successful seek
1533 if (!seek_audio_frame_found && is_seeking)
1534 seek_audio_frame_found = location.frame;
1535 }
1536
1537 // Create or get the existing frame object. Requested frame needs to be created
1538 // in working_cache at least once. Seek can clear the working_cache, so we must
1539 // add the requested frame back to the working_cache here. If it already exists,
1540 // it will be moved to the top of the working_cache.
1541 working_cache.Add(CreateFrame(requested_frame));
1542
1543 // Debug output
1544 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (Before)",
1545 "requested_frame", requested_frame,
1546 "target_frame", location.frame,
1547 "starting_sample", location.sample_start);
1548
1549 // Init an AVFrame to hold the decoded audio samples
1550 int frame_finished = 0;
1551 AVFrame *audio_frame = AV_ALLOCATE_FRAME();
1552 AV_RESET_FRAME(audio_frame);
1553
1554 int packet_samples = 0;
1555 int data_size = 0;
1556
1557#if IS_FFMPEG_3_2
1558 int send_packet_err = avcodec_send_packet(aCodecCtx, packet);
1559 if (send_packet_err < 0 && send_packet_err != AVERROR_EOF) {
1560 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (Packet not sent)");
1561 }
1562 else {
1563 int receive_frame_err = avcodec_receive_frame(aCodecCtx, audio_frame);
1564 if (receive_frame_err >= 0) {
1565 frame_finished = 1;
1566 }
1567 if (receive_frame_err == AVERROR_EOF) {
1568 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (EOF detected from decoder)");
1569 packet_status.audio_eof = true;
1570 }
1571 if (receive_frame_err == AVERROR(EINVAL) || receive_frame_err == AVERROR_EOF) {
1572 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (invalid frame received or EOF from decoder)");
1573 avcodec_flush_buffers(aCodecCtx);
1574 }
1575 if (receive_frame_err != 0) {
1576 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (frame not ready yet from decoder)");
1577 }
1578 }
1579#else
1580 int used = avcodec_decode_audio4(aCodecCtx, audio_frame, &frame_finished, packet);
1581#endif
1582
1583 if (frame_finished) {
1584 packet_status.audio_decoded++;
1585
1586 // This can be different than the current packet, so we need to look
1587 // at the current AVFrame from the audio decoder. This timestamp should
1588 // be used for the remainder of this function
1589 audio_pts = audio_frame->pts;
1590
1591 // Determine related video frame and starting sample # from audio PTS
1592 location = GetAudioPTSLocation(audio_pts);
1593
1594 // determine how many samples were decoded
1595 int plane_size = -1;
1596 data_size = av_samples_get_buffer_size(&plane_size,
1597 AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channels,
1598 audio_frame->nb_samples,
1599 (AVSampleFormat) (AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx)), 1);
1600
1601 // Calculate total number of samples
1602 packet_samples = audio_frame->nb_samples * AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channels;
1603 } else {
1604 if (audio_frame) {
1605 // Free audio frame
1606 AV_FREE_FRAME(&audio_frame);
1607 }
1608 }
1609
1610 // Estimate the # of samples and the end of this packet's location (to prevent GAPS for the next timestamp)
1611 int pts_remaining_samples = packet_samples / info.channels; // Adjust for zero based array
1612
1613 // Bail if no samples found
1614 if (pts_remaining_samples == 0) {
1615 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (No samples, bailing)",
1616 "packet_samples", packet_samples,
1617 "info.channels", info.channels,
1618 "pts_remaining_samples", pts_remaining_samples);
1619 return;
1620 }
1621
1622 while (pts_remaining_samples) {
1623 // Get Samples per frame (for this frame number)
1624 int samples_per_frame = Frame::GetSamplesPerFrame(previous_packet_location.frame, info.fps, info.sample_rate, info.channels);
1625
1626 // Calculate # of samples to add to this frame
1627 int samples = samples_per_frame - previous_packet_location.sample_start;
1628 if (samples > pts_remaining_samples)
1629 samples = pts_remaining_samples;
1630
1631 // Decrement remaining samples
1632 pts_remaining_samples -= samples;
1633
1634 if (pts_remaining_samples > 0) {
1635 // next frame
1636 previous_packet_location.frame++;
1637 previous_packet_location.sample_start = 0;
1638 } else {
1639 // Increment sample start
1640 previous_packet_location.sample_start += samples;
1641 }
1642 }
1643
1644 // Allocate audio buffer
1645 int16_t *audio_buf = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE];
1646
1647 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (ReSample)",
1648 "packet_samples", packet_samples,
1649 "info.channels", info.channels,
1650 "info.sample_rate", info.sample_rate,
1651 "aCodecCtx->sample_fmt", AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx),
1652 "AV_SAMPLE_FMT_S16", AV_SAMPLE_FMT_S16);
1653
1654 // Create output frame
1655 AVFrame *audio_converted = AV_ALLOCATE_FRAME();
1656 AV_RESET_FRAME(audio_converted);
1657 audio_converted->nb_samples = audio_frame->nb_samples;
1658 av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_frame->nb_samples, AV_SAMPLE_FMT_S16, 0);
1659
1660 SWRCONTEXT *avr = NULL;
1661 int nb_samples = 0;
1662
1663 // setup resample context
1664 avr = SWR_ALLOC();
1665 av_opt_set_int(avr, "in_channel_layout", AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout, 0);
1666 av_opt_set_int(avr, "out_channel_layout", AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout, 0);
1667 av_opt_set_int(avr, "in_sample_fmt", AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx), 0);
1668 av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
1669 av_opt_set_int(avr, "in_sample_rate", info.sample_rate, 0);
1670 av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
1671 av_opt_set_int(avr, "in_channels", info.channels, 0);
1672 av_opt_set_int(avr, "out_channels", info.channels, 0);
1673 SWR_INIT(avr);
1674
1675 // Convert audio samples
1676 nb_samples = SWR_CONVERT(avr, // audio resample context
1677 audio_converted->data, // output data pointers
1678 audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
1679 audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
1680 audio_frame->data, // input data pointers
1681 audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
1682 audio_frame->nb_samples); // number of input samples to convert
1683
1684 // Copy audio samples over original samples
1685 memcpy(audio_buf,
1686 audio_converted->data[0],
1687 static_cast<size_t>(audio_converted->nb_samples)
1688 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)
1689 * info.channels);
1690
1691 // Deallocate resample buffer
1692 SWR_CLOSE(avr);
1693 SWR_FREE(&avr);
1694 avr = NULL;
1695
1696 // Free AVFrames
1697 av_free(audio_converted->data[0]);
1698 AV_FREE_FRAME(&audio_converted);
1699
1700 int64_t starting_frame_number = -1;
1701 bool partial_frame = true;
1702 for (int channel_filter = 0; channel_filter < info.channels; channel_filter++) {
1703 // Array of floats (to hold samples for each channel)
1704 starting_frame_number = location.frame;
1705 int channel_buffer_size = packet_samples / info.channels;
1706 float *channel_buffer = new float[channel_buffer_size];
1707
1708 // Init buffer array
1709 for (int z = 0; z < channel_buffer_size; z++)
1710 channel_buffer[z] = 0.0f;
1711
1712 // Loop through all samples and add them to our Frame based on channel.
1713 // Toggle through each channel number, since channel data is stored like (left right left right)
1714 int channel = 0;
1715 int position = 0;
1716 for (int sample = 0; sample < packet_samples; sample++) {
1717 // Only add samples for current channel
1718 if (channel_filter == channel) {
1719 // Add sample (convert from (-32768 to 32768) to (-1.0 to 1.0))
1720 channel_buffer[position] = audio_buf[sample] * (1.0f / (1 << 15));
1721
1722 // Increment audio position
1723 position++;
1724 }
1725
1726 // increment channel (if needed)
1727 if ((channel + 1) < info.channels)
1728 // move to next channel
1729 channel++;
1730 else
1731 // reset channel
1732 channel = 0;
1733 }
1734
1735 // Loop through samples, and add them to the correct frames
1736 int start = location.sample_start;
1737 int remaining_samples = channel_buffer_size;
1738 float *iterate_channel_buffer = channel_buffer; // pointer to channel buffer
1739 while (remaining_samples > 0) {
1740 // Get Samples per frame (for this frame number)
1741 int samples_per_frame = Frame::GetSamplesPerFrame(starting_frame_number,
1743
1744 // Calculate # of samples to add to this frame
1745 int samples = samples_per_frame - start;
1746 if (samples > remaining_samples)
1747 samples = remaining_samples;
1748
1749 // Create or get the existing frame object
1750 std::shared_ptr<Frame> f = CreateFrame(starting_frame_number);
1751
1752 // Determine if this frame was "partially" filled in
1753 if (samples_per_frame == start + samples)
1754 partial_frame = false;
1755 else
1756 partial_frame = true;
1757
1758 // Add samples for current channel to the frame.
1759 f->AddAudio(true, channel_filter, start, iterate_channel_buffer,
1760 samples, 1.0f);
1761
1762 // Debug output
1763 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (f->AddAudio)",
1764 "frame", starting_frame_number,
1765 "start", start,
1766 "samples", samples,
1767 "channel", channel_filter,
1768 "partial_frame", partial_frame,
1769 "samples_per_frame", samples_per_frame);
1770
1771 // Add or update cache
1772 working_cache.Add(f);
1773
1774 // Decrement remaining samples
1775 remaining_samples -= samples;
1776
1777 // Increment buffer (to next set of samples)
1778 if (remaining_samples > 0)
1779 iterate_channel_buffer += samples;
1780
1781 // Increment frame number
1782 starting_frame_number++;
1783
1784 // Reset starting sample #
1785 start = 0;
1786 }
1787
1788 // clear channel buffer
1789 delete[] channel_buffer;
1790 channel_buffer = NULL;
1791 iterate_channel_buffer = NULL;
1792 }
1793
1794 // Clean up some arrays
1795 delete[] audio_buf;
1796 audio_buf = NULL;
1797
1798 // Free audio frame
1799 AV_FREE_FRAME(&audio_frame);
1800
1801 // Get audio PTS in seconds
1802 audio_pts_seconds = (double(audio_pts) * info.audio_timebase.ToDouble()) + pts_offset_seconds;
1803
1804 // Debug output
1805 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (After)",
1806 "requested_frame", requested_frame,
1807 "starting_frame", location.frame,
1808 "end_frame", starting_frame_number - 1,
1809 "audio_pts_seconds", audio_pts_seconds);
1810
1811}
1812
1813
1814// Seek to a specific frame. This is not always frame accurate, it's more of an estimation on many codecs.
1815void FFmpegReader::Seek(int64_t requested_frame) {
1816 // Adjust for a requested frame that is too small or too large
1817 if (requested_frame < 1)
1818 requested_frame = 1;
1819 if (requested_frame > info.video_length)
1820 requested_frame = info.video_length;
1821 if (requested_frame > largest_frame_processed && packet_status.end_of_file) {
1822 // Not possible to search past largest_frame once EOF is reached (no more packets)
1823 return;
1824 }
1825
1826 // Debug output
1827 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::Seek",
1828 "requested_frame", requested_frame,
1829 "seek_count", seek_count,
1830 "last_frame", last_frame);
1831
1832 // Clear working cache (since we are seeking to another location in the file)
1833 working_cache.Clear();
1834
1835 // Reset the last frame variable
1836 video_pts = 0.0;
1837 video_pts_seconds = NO_PTS_OFFSET;
1838 audio_pts = 0.0;
1839 audio_pts_seconds = NO_PTS_OFFSET;
1840 hold_packet = false;
1841 last_frame = 0;
1842 current_video_frame = 0;
1843 largest_frame_processed = 0;
1844 bool has_audio_override = info.has_audio;
1845 bool has_video_override = info.has_video;
1846
1847 // Init end-of-file detection variables
1848 packet_status.reset(false);
1849
1850 // Increment seek count
1851 seek_count++;
1852
1853 // If seeking near frame 1, we need to close and re-open the file (this is more reliable than seeking)
1854 int buffer_amount = std::max(max_concurrent_frames, 8);
1855 if (requested_frame - buffer_amount < 20) {
1856 // prevent Open() from seeking again
1857 is_seeking = true;
1858
1859 // Close and re-open file (basically seeking to frame 1)
1860 Close();
1861 Open();
1862
1863 // Update overrides (since closing and re-opening might update these)
1864 info.has_audio = has_audio_override;
1865 info.has_video = has_video_override;
1866
1867 // Not actually seeking, so clear these flags
1868 is_seeking = false;
1869 if (seek_count == 1) {
1870 // Don't redefine this on multiple seek attempts for a specific frame
1871 seeking_frame = 1;
1872 seeking_pts = ConvertFrameToVideoPTS(1);
1873 }
1874 seek_audio_frame_found = 0; // used to detect which frames to throw away after a seek
1875 seek_video_frame_found = 0; // used to detect which frames to throw away after a seek
1876
1877 } else {
1878 // Seek to nearest key-frame (aka, i-frame)
1879 bool seek_worked = false;
1880 int64_t seek_target = 0;
1881
1882 // Seek video stream (if any), except album arts
1883 if (!seek_worked && info.has_video && !HasAlbumArt()) {
1884 seek_target = ConvertFrameToVideoPTS(requested_frame - buffer_amount);
1885 if (av_seek_frame(pFormatCtx, info.video_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
1886 fprintf(stderr, "%s: error while seeking video stream\n", pFormatCtx->AV_FILENAME);
1887 } else {
1888 // VIDEO SEEK
1889 is_video_seek = true;
1890 seek_worked = true;
1891 }
1892 }
1893
1894 // Seek audio stream (if not already seeked... and if an audio stream is found)
1895 if (!seek_worked && info.has_audio) {
1896 seek_target = ConvertFrameToAudioPTS(requested_frame - buffer_amount);
1897 if (av_seek_frame(pFormatCtx, info.audio_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
1898 fprintf(stderr, "%s: error while seeking audio stream\n", pFormatCtx->AV_FILENAME);
1899 } else {
1900 // AUDIO SEEK
1901 is_video_seek = false;
1902 seek_worked = true;
1903 }
1904 }
1905
1906 // Was the seek successful?
1907 if (seek_worked) {
1908 // Flush audio buffer
1909 if (info.has_audio)
1910 avcodec_flush_buffers(aCodecCtx);
1911
1912 // Flush video buffer
1913 if (info.has_video)
1914 avcodec_flush_buffers(pCodecCtx);
1915
1916 // Reset previous audio location to zero
1917 previous_packet_location.frame = -1;
1918 previous_packet_location.sample_start = 0;
1919
1920 // init seek flags
1921 is_seeking = true;
1922 if (seek_count == 1) {
1923 // Don't redefine this on multiple seek attempts for a specific frame
1924 seeking_pts = seek_target;
1925 seeking_frame = requested_frame;
1926 }
1927 seek_audio_frame_found = 0; // used to detect which frames to throw away after a seek
1928 seek_video_frame_found = 0; // used to detect which frames to throw away after a seek
1929
1930 } else {
1931 // seek failed
1932 seeking_pts = 0;
1933 seeking_frame = 0;
1934
1935 // prevent Open() from seeking again
1936 is_seeking = true;
1937
1938 // Close and re-open file (basically seeking to frame 1)
1939 Close();
1940 Open();
1941
1942 // Not actually seeking, so clear these flags
1943 is_seeking = false;
1944
1945 // disable seeking for this reader (since it failed)
1946 enable_seek = false;
1947
1948 // Update overrides (since closing and re-opening might update these)
1949 info.has_audio = has_audio_override;
1950 info.has_video = has_video_override;
1951 }
1952 }
1953}
1954
1955// Get the PTS for the current video packet
1956int64_t FFmpegReader::GetPacketPTS() {
1957 if (packet) {
1958 int64_t current_pts = packet->pts;
1959 if (current_pts == AV_NOPTS_VALUE && packet->dts != AV_NOPTS_VALUE)
1960 current_pts = packet->dts;
1961
1962 // Return adjusted PTS
1963 return current_pts;
1964 } else {
1965 // No packet, return NO PTS
1966 return AV_NOPTS_VALUE;
1967 }
1968}
1969
1970// Update PTS Offset (if any)
1971void FFmpegReader::UpdatePTSOffset() {
1972 if (pts_offset_seconds != NO_PTS_OFFSET) {
1973 // Skip this method if we have already set PTS offset
1974 return;
1975 }
1976 pts_offset_seconds = 0.0;
1977 double video_pts_offset_seconds = 0.0;
1978 double audio_pts_offset_seconds = 0.0;
1979
1980 bool has_video_pts = false;
1981 if (!info.has_video) {
1982 // Mark as checked
1983 has_video_pts = true;
1984 }
1985 bool has_audio_pts = false;
1986 if (!info.has_audio) {
1987 // Mark as checked
1988 has_audio_pts = true;
1989 }
1990
1991 // Loop through the stream (until a packet from all streams is found)
1992 while (!has_video_pts || !has_audio_pts) {
1993 // Get the next packet (if any)
1994 if (GetNextPacket() < 0)
1995 // Break loop when no more packets found
1996 break;
1997
1998 // Get PTS of this packet
1999 int64_t pts = GetPacketPTS();
2000
2001 // Video packet
2002 if (!has_video_pts && packet->stream_index == videoStream) {
2003 // Get the video packet start time (in seconds)
2004 video_pts_offset_seconds = 0.0 - (video_pts * info.video_timebase.ToDouble());
2005
2006 // Is timestamp close to zero (within X seconds)
2007 // Ignore wildly invalid timestamps (i.e. -234923423423)
2008 if (std::abs(video_pts_offset_seconds) <= 10.0) {
2009 has_video_pts = true;
2010 }
2011 }
2012 else if (!has_audio_pts && packet->stream_index == audioStream) {
2013 // Get the audio packet start time (in seconds)
2014 audio_pts_offset_seconds = 0.0 - (pts * info.audio_timebase.ToDouble());
2015
2016 // Is timestamp close to zero (within X seconds)
2017 // Ignore wildly invalid timestamps (i.e. -234923423423)
2018 if (std::abs(audio_pts_offset_seconds) <= 10.0) {
2019 has_audio_pts = true;
2020 }
2021 }
2022 }
2023
2024 // Do we have all valid timestamps to determine PTS offset?
2025 if (has_video_pts && has_audio_pts) {
2026 // Set PTS Offset to the smallest offset
2027 // [ video timestamp ]
2028 // [ audio timestamp ]
2029 //
2030 // ** SHIFT TIMESTAMPS TO ZERO **
2031 //
2032 //[ video timestamp ]
2033 // [ audio timestamp ]
2034 //
2035 // Since all offsets are negative at this point, we want the max value, which
2036 // represents the closest to zero
2037 pts_offset_seconds = std::max(video_pts_offset_seconds, audio_pts_offset_seconds);
2038 }
2039}
2040
2041// Convert PTS into Frame Number
2042int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts) {
2043 // Apply PTS offset
2044 int64_t previous_video_frame = current_video_frame;
2045
2046 // Get the video packet start time (in seconds)
2047 double video_seconds = (double(pts) * info.video_timebase.ToDouble()) + pts_offset_seconds;
2048
2049 // Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
2050 int64_t frame = round(video_seconds * info.fps.ToDouble()) + 1;
2051
2052 // Keep track of the expected video frame #
2053 if (current_video_frame == 0)
2054 current_video_frame = frame;
2055 else {
2056
2057 // Sometimes frames are duplicated due to identical (or similar) timestamps
2058 if (frame == previous_video_frame) {
2059 // return -1 frame number
2060 frame = -1;
2061 } else {
2062 // Increment expected frame
2063 current_video_frame++;
2064 }
2065 }
2066
2067 // Return frame #
2068 return frame;
2069}
2070
2071// Convert Frame Number into Video PTS
2072int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number) {
2073 // Get timestamp of this frame (in seconds)
2074 double seconds = (double(frame_number - 1) / info.fps.ToDouble()) + pts_offset_seconds;
2075
2076 // Calculate the # of video packets in this timestamp
2077 int64_t video_pts = round(seconds / info.video_timebase.ToDouble());
2078
2079 // Apply PTS offset (opposite)
2080 return video_pts;
2081}
2082
2083// Convert Frame Number into Video PTS
2084int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number) {
2085 // Get timestamp of this frame (in seconds)
2086 double seconds = (double(frame_number - 1) / info.fps.ToDouble()) + pts_offset_seconds;
2087
2088 // Calculate the # of audio packets in this timestamp
2089 int64_t audio_pts = round(seconds / info.audio_timebase.ToDouble());
2090
2091 // Apply PTS offset (opposite)
2092 return audio_pts;
2093}
2094
2095// Calculate Starting video frame and sample # for an audio PTS
2096AudioLocation FFmpegReader::GetAudioPTSLocation(int64_t pts) {
2097 // Get the audio packet start time (in seconds)
2098 double audio_seconds = (double(pts) * info.audio_timebase.ToDouble()) + pts_offset_seconds;
2099
2100 // Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
2101 double frame = (audio_seconds * info.fps.ToDouble()) + 1;
2102
2103 // Frame # as a whole number (no more decimals)
2104 int64_t whole_frame = int64_t(frame);
2105
2106 // Remove the whole number, and only get the decimal of the frame
2107 double sample_start_percentage = frame - double(whole_frame);
2108
2109 // Get Samples per frame
2110 int samples_per_frame = Frame::GetSamplesPerFrame(whole_frame, info.fps, info.sample_rate, info.channels);
2111
2112 // Calculate the sample # to start on
2113 int sample_start = round(double(samples_per_frame) * sample_start_percentage);
2114
2115 // Protect against broken (i.e. negative) timestamps
2116 if (whole_frame < 1)
2117 whole_frame = 1;
2118 if (sample_start < 0)
2119 sample_start = 0;
2120
2121 // Prepare final audio packet location
2122 AudioLocation location = {whole_frame, sample_start};
2123
2124 // Compare to previous audio packet (and fix small gaps due to varying PTS timestamps)
2125 if (previous_packet_location.frame != -1) {
2126 if (location.is_near(previous_packet_location, samples_per_frame, samples_per_frame)) {
2127 int64_t orig_frame = location.frame;
2128 int orig_start = location.sample_start;
2129
2130 // Update sample start, to prevent gaps in audio
2131 location.sample_start = previous_packet_location.sample_start;
2132 location.frame = previous_packet_location.frame;
2133
2134 // Debug output
2135 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAudioPTSLocation (Audio Gap Detected)", "Source Frame", orig_frame, "Source Audio Sample", orig_start, "Target Frame", location.frame, "Target Audio Sample", location.sample_start, "pts", pts);
2136
2137 } else {
2138 // Debug output
2139 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAudioPTSLocation (Audio Gap Ignored - too big)", "Previous location frame", previous_packet_location.frame, "Target Frame", location.frame, "Target Audio Sample", location.sample_start, "pts", pts);
2140 }
2141 }
2142
2143 // Set previous location
2144 previous_packet_location = location;
2145
2146 // Return the associated video frame and starting sample #
2147 return location;
2148}
2149
2150// Create a new Frame (or return an existing one) and add it to the working queue.
2151std::shared_ptr<Frame> FFmpegReader::CreateFrame(int64_t requested_frame) {
2152 // Check working cache
2153 std::shared_ptr<Frame> output = working_cache.GetFrame(requested_frame);
2154
2155 if (!output) {
2156 // (re-)Check working cache
2157 output = working_cache.GetFrame(requested_frame);
2158 if(output) return output;
2159
2160 // Create a new frame on the working cache
2161 output = std::make_shared<Frame>(requested_frame, info.width, info.height, "#000000", Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels), info.channels);
2162 output->SetPixelRatio(info.pixel_ratio.num, info.pixel_ratio.den); // update pixel ratio
2163 output->ChannelsLayout(info.channel_layout); // update audio channel layout from the parent reader
2164 output->SampleRate(info.sample_rate); // update the frame's sample rate of the parent reader
2165
2166 working_cache.Add(output);
2167
2168 // Set the largest processed frame (if this is larger)
2169 if (requested_frame > largest_frame_processed)
2170 largest_frame_processed = requested_frame;
2171 }
2172 // Return frame
2173 return output;
2174}
2175
2176// Determine if frame is partial due to seek
2177bool FFmpegReader::IsPartialFrame(int64_t requested_frame) {
2178
2179 // Sometimes a seek gets partial frames, and we need to remove them
2180 bool seek_trash = false;
2181 int64_t max_seeked_frame = seek_audio_frame_found; // determine max seeked frame
2182 if (seek_video_frame_found > max_seeked_frame) {
2183 max_seeked_frame = seek_video_frame_found;
2184 }
2185 if ((info.has_audio && seek_audio_frame_found && max_seeked_frame >= requested_frame) ||
2186 (info.has_video && seek_video_frame_found && max_seeked_frame >= requested_frame)) {
2187 seek_trash = true;
2188 }
2189
2190 return seek_trash;
2191}
2192
2193// Check the working queue, and move finished frames to the finished queue
2194void FFmpegReader::CheckWorkingFrames(int64_t requested_frame) {
2195
2196 // Prevent async calls to the following code
2197 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
2198
2199 // Get a list of current working queue frames in the cache (in-progress frames)
2200 std::vector<std::shared_ptr<openshot::Frame>> working_frames = working_cache.GetFrames();
2201 std::vector<std::shared_ptr<openshot::Frame>>::iterator working_itr;
2202
2203 // Loop through all working queue frames (sorted by frame #)
2204 for(working_itr = working_frames.begin(); working_itr != working_frames.end(); ++working_itr)
2205 {
2206 // Get working frame
2207 std::shared_ptr<Frame> f = *working_itr;
2208
2209 // Was a frame found? Is frame requested yet?
2210 if (!f || f->number > requested_frame) {
2211 // If not, skip to next one
2212 continue;
2213 }
2214
2215 // Calculate PTS in seconds (of working frame), and the most recent processed pts value
2216 double frame_pts_seconds = (double(f->number - 1) / info.fps.ToDouble()) + pts_offset_seconds;
2217 double recent_pts_seconds = std::max(video_pts_seconds, audio_pts_seconds);
2218
2219 // Determine if video and audio are ready (based on timestamps)
2220 bool is_video_ready = false;
2221 bool is_audio_ready = false;
2222 double recent_pts_diff = recent_pts_seconds - frame_pts_seconds;
2223 if ((frame_pts_seconds <= video_pts_seconds)
2224 || (recent_pts_diff > 1.5)
2225 || packet_status.video_eof || packet_status.end_of_file) {
2226 // Video stream is past this frame (so it must be done)
2227 // OR video stream is too far behind, missing, or end-of-file
2228 is_video_ready = true;
2229 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckWorkingFrames (video ready)",
2230 "frame_number", f->number,
2231 "frame_pts_seconds", frame_pts_seconds,
2232 "video_pts_seconds", video_pts_seconds,
2233 "recent_pts_diff", recent_pts_diff);
2234 if (info.has_video && !f->has_image_data) {
2235 // Frame has no image data (copy from previous frame)
2236 // Loop backwards through final frames (looking for the nearest, previous frame image)
2237 for (int64_t previous_frame = requested_frame - 1; previous_frame > 0; previous_frame--) {
2238 std::shared_ptr<Frame> previous_frame_instance = final_cache.GetFrame(previous_frame);
2239 if (previous_frame_instance && previous_frame_instance->has_image_data) {
2240 // Copy image from last decoded frame
2241 f->AddImage(std::make_shared<QImage>(previous_frame_instance->GetImage()->copy()));
2242 break;
2243 }
2244 }
2245
2246 if (last_video_frame && !f->has_image_data) {
2247 // Copy image from last decoded frame
2248 f->AddImage(std::make_shared<QImage>(last_video_frame->GetImage()->copy()));
2249 } else if (!f->has_image_data) {
2250 f->AddColor("#000000");
2251 }
2252 }
2253 }
2254
2255 double audio_pts_diff = audio_pts_seconds - frame_pts_seconds;
2256 if ((frame_pts_seconds < audio_pts_seconds && audio_pts_diff > 1.0)
2257 || (recent_pts_diff > 1.5)
2258 || packet_status.audio_eof || packet_status.end_of_file) {
2259 // Audio stream is past this frame (so it must be done)
2260 // OR audio stream is too far behind, missing, or end-of-file
2261 // Adding a bit of margin here, to allow for partial audio packets
2262 is_audio_ready = true;
2263 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckWorkingFrames (audio ready)",
2264 "frame_number", f->number,
2265 "frame_pts_seconds", frame_pts_seconds,
2266 "audio_pts_seconds", audio_pts_seconds,
2267 "audio_pts_diff", audio_pts_diff,
2268 "recent_pts_diff", recent_pts_diff);
2269 }
2270 bool is_seek_trash = IsPartialFrame(f->number);
2271
2272 // Adjust for available streams
2273 if (!info.has_video) is_video_ready = true;
2274 if (!info.has_audio) is_audio_ready = true;
2275
2276 // Debug output
2277 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckWorkingFrames",
2278 "frame_number", f->number,
2279 "is_video_ready", is_video_ready,
2280 "is_audio_ready", is_audio_ready,
2281 "video_eof", packet_status.video_eof,
2282 "audio_eof", packet_status.audio_eof,
2283 "end_of_file", packet_status.end_of_file);
2284
2285 // Check if working frame is final
2286 if ((!packet_status.end_of_file && is_video_ready && is_audio_ready) || packet_status.end_of_file || is_seek_trash) {
2287 // Debug output
2288 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckWorkingFrames (mark frame as final)",
2289 "requested_frame", requested_frame,
2290 "f->number", f->number,
2291 "is_seek_trash", is_seek_trash,
2292 "Working Cache Count", working_cache.Count(),
2293 "Final Cache Count", final_cache.Count(),
2294 "end_of_file", packet_status.end_of_file);
2295
2296 if (!is_seek_trash) {
2297 // Move frame to final cache
2298 final_cache.Add(f);
2299
2300 // Remove frame from working cache
2301 working_cache.Remove(f->number);
2302
2303 // Update last frame processed
2304 last_frame = f->number;
2305 } else {
2306 // Seek trash, so delete the frame from the working cache, and never add it to the final cache.
2307 working_cache.Remove(f->number);
2308 }
2309
2310 }
2311 }
2312
2313 // Clear vector of frames
2314 working_frames.clear();
2315 working_frames.shrink_to_fit();
2316}
2317
2318// Check for the correct frames per second (FPS) value by scanning the 1st few seconds of video packets.
2319void FFmpegReader::CheckFPS() {
2320 if (check_fps) {
2321 // Do not check FPS more than 1 time
2322 return;
2323 } else {
2324 check_fps = true;
2325 }
2326
2327 int frames_per_second[3] = {0,0,0};
2328 int max_fps_index = sizeof(frames_per_second) / sizeof(frames_per_second[0]);
2329 int fps_index = 0;
2330
2331 int all_frames_detected = 0;
2332 int starting_frames_detected = 0;
2333
2334 // Loop through the stream
2335 while (true) {
2336 // Get the next packet (if any)
2337 if (GetNextPacket() < 0)
2338 // Break loop when no more packets found
2339 break;
2340
2341 // Video packet
2342 if (packet->stream_index == videoStream) {
2343 // Get the video packet start time (in seconds)
2344 double video_seconds = (double(GetPacketPTS()) * info.video_timebase.ToDouble()) + pts_offset_seconds;
2345 fps_index = int(video_seconds); // truncate float timestamp to int (second 1, second 2, second 3)
2346
2347 // Is this video packet from the first few seconds?
2348 if (fps_index >= 0 && fps_index < max_fps_index) {
2349 // Yes, keep track of how many frames per second (over the first few seconds)
2350 starting_frames_detected++;
2351 frames_per_second[fps_index]++;
2352 }
2353
2354 // Track all video packets detected
2355 all_frames_detected++;
2356 }
2357 }
2358
2359 // Calculate FPS (based on the first few seconds of video packets)
2360 float avg_fps = 30.0;
2361 if (starting_frames_detected > 0 && fps_index > 0) {
2362 avg_fps = float(starting_frames_detected) / std::min(fps_index, max_fps_index);
2363 }
2364
2365 // Verify average FPS is a reasonable value
2366 if (avg_fps < 8.0) {
2367 // Invalid FPS assumed, so switching to a sane default FPS instead
2368 avg_fps = 30.0;
2369 }
2370
2371 // Update FPS (truncate average FPS to Integer)
2372 info.fps = Fraction(int(avg_fps), 1);
2373
2374 // Update Duration and Length
2375 if (all_frames_detected > 0) {
2376 // Use all video frames detected to calculate # of frames
2377 info.video_length = all_frames_detected;
2378 info.duration = all_frames_detected / avg_fps;
2379 } else {
2380 // Use previous duration to calculate # of frames
2381 info.video_length = info.duration * avg_fps;
2382 }
2383
2384 // Update video bit rate
2386}
2387
2388// Remove AVFrame from cache (and deallocate its memory)
2389void FFmpegReader::RemoveAVFrame(AVFrame *remove_frame) {
2390 // Remove pFrame (if exists)
2391 if (remove_frame) {
2392 // Free memory
2393 av_freep(&remove_frame->data[0]);
2394#ifndef WIN32
2395 AV_FREE_FRAME(&remove_frame);
2396#endif
2397 }
2398}
2399
2400// Remove AVPacket from cache (and deallocate its memory)
2401void FFmpegReader::RemoveAVPacket(AVPacket *remove_packet) {
2402 // deallocate memory for packet
2403 AV_FREE_PACKET(remove_packet);
2404
2405 // Delete the object
2406 delete remove_packet;
2407}
2408
2409// Generate JSON string of this object
2410std::string FFmpegReader::Json() const {
2411
2412 // Return formatted string
2413 return JsonValue().toStyledString();
2414}
2415
2416// Generate Json::Value for this object
2417Json::Value FFmpegReader::JsonValue() const {
2418
2419 // Create root json object
2420 Json::Value root = ReaderBase::JsonValue(); // get parent properties
2421 root["type"] = "FFmpegReader";
2422 root["path"] = path;
2423
2424 // return JsonValue
2425 return root;
2426}
2427
2428// Load JSON string into this object
2429void FFmpegReader::SetJson(const std::string value) {
2430
2431 // Parse JSON string into JSON objects
2432 try {
2433 const Json::Value root = openshot::stringToJson(value);
2434 // Set all values that match
2435 SetJsonValue(root);
2436 }
2437 catch (const std::exception& e) {
2438 // Error parsing JSON (or missing keys)
2439 throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
2440 }
2441}
2442
2443// Load Json::Value into this object
2444void FFmpegReader::SetJsonValue(const Json::Value root) {
2445
2446 // Set parent data
2448
2449 // Set data from Json (if key is found)
2450 if (!root["path"].isNull())
2451 path = root["path"].asString();
2452
2453 // Re-Open path, and re-init everything (if needed)
2454 if (is_open) {
2455 Close();
2456 Open();
2457 }
2458}
Header file for all Exception classes.
AVPixelFormat hw_de_av_pix_fmt_global
AVHWDeviceType hw_de_av_device_type_global
int hw_de_on
Header file for FFmpegReader class.
Header file for FFmpegUtilities.
#define AV_FREE_CONTEXT(av_context)
#define SWR_INIT(ctx)
#define AV_FREE_FRAME(av_frame)
#define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count)
#define SWR_ALLOC()
#define SWR_CLOSE(ctx)
#define AV_GET_CODEC_TYPE(av_stream)
#define PixelFormat
#define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context)
#define AV_GET_CODEC_CONTEXT(av_stream, av_codec)
#define AV_FIND_DECODER_CODEC_ID(av_stream)
#define AV_ALLOCATE_FRAME()
#define AV_REGISTER_ALL
#define PIX_FMT_RGBA
#define SWR_FREE(ctx)
#define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height)
#define AV_FREE_PACKET(av_packet)
#define SWRCONTEXT
#define AVCODEC_REGISTER_ALL
#define AVCODEC_MAX_AUDIO_FRAME_SIZE
#define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context)
#define MY_INPUT_BUFFER_PADDING_SIZE
#define AV_GET_SAMPLE_FORMAT(av_stream, av_context)
#define AV_RESET_FRAME(av_frame)
AVDictionary * opts
if(!codec) codec
#define FF_NUM_PROCESSORS
#define OPEN_MP_NUM_PROCESSORS
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:30
int64_t Count()
Count the frames in the queue.
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
Definition: CacheMemory.cpp:46
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
Definition: CacheMemory.cpp:80
std::vector< std::shared_ptr< openshot::Frame > > GetFrames()
Get an array of all Frames.
Definition: CacheMemory.cpp:96
void Remove(int64_t frame_number)
Remove a specific frame.
void Clear()
Clear the cache of all frames.
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:89
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:298
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
Definition: Clip.h:276
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:299
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:166
double Y
The Y value of the coordinate (usually representing the value of the property being animated)
Definition: Coordinate.h:41
void Open() override
Open File - which is called by the constructor automatically.
Json::Value JsonValue() const override
Generate Json::Value for this object.
bool GetIsDurationKnown()
Return true if frame can be read with GetFrame()
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
CacheMemory final_cache
Final cache object used to hold final frames.
Definition: FFmpegReader.h:228
virtual ~FFmpegReader()
Destructor.
std::string Json() const override
Generate JSON string of this object.
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame) override
void Close() override
Close File.
void SetJson(const std::string value) override
Load JSON string into this object.
This class represents a fraction.
Definition: Fraction.h:30
int num
Numerator for the fraction.
Definition: Fraction.h:32
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:35
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:40
int den
Denominator for the fraction.
Definition: Fraction.h:33
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:480
Exception when no valid codec is found for a file.
Definition: Exceptions.h:173
Exception for files that can not be found or opened.
Definition: Exceptions.h:188
Exception for invalid JSON.
Definition: Exceptions.h:218
Point GetMaxPoint() const
Get max point (by Y coordinate)
Definition: KeyFrame.cpp:245
Exception when no streams are found in the file.
Definition: Exceptions.h:286
Exception when memory could not be allocated.
Definition: Exceptions.h:349
Coordinate co
This is the primary coordinate.
Definition: Point.h:66
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:88
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:162
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:107
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition: ReaderBase.h:79
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
Definition: ReaderBase.cpp:245
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:364
int DE_LIMIT_WIDTH_MAX
Maximum columns that hardware decode can handle.
Definition: Settings.h:77
int HW_DE_DEVICE_SET
Which GPU to use to decode (0 is the first)
Definition: Settings.h:80
int DE_LIMIT_HEIGHT_MAX
Maximum rows that hardware decode can handle.
Definition: Settings.h:74
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: Settings.cpp:23
int HARDWARE_DECODER
Use video codec for faster video decoding (if supported)
Definition: Settings.h:62
int preview_height
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
Definition: TimelineBase.h:44
int preview_width
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
Definition: TimelineBase.h:43
This class represents a timeline.
Definition: Timeline.h:150
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:178
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: ZmqLogger.cpp:35
This namespace is the default namespace for all code in the openshot library.
Definition: Compressor.h:29
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:38
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:39
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition: Enums.h:37
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround,...
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:16
This struct holds the associated video frame and starting sample # for an audio packet.
Definition: AudioLocation.h:25
bool is_near(AudioLocation location, int samples_per_frame, int64_t amount)
void reset(bool eof)
Definition: FFmpegReader.h:68
int audio_bit_rate
The bit rate of the audio stream (in bytes)
Definition: ReaderBase.h:59
int video_bit_rate
The bit rate of the video stream (in bytes)
Definition: ReaderBase.h:49
bool has_single_image
Determines if this file only contains a single image.
Definition: ReaderBase.h:42
float duration
Length of time (in seconds)
Definition: ReaderBase.h:43
openshot::Fraction audio_timebase
The audio timebase determines how long each audio packet should be played.
Definition: ReaderBase.h:64
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:46
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:61
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:48
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3)
Definition: ReaderBase.h:51
int height
The height of the video (in pixels)
Definition: ReaderBase.h:45
int pixel_format
The pixel format (i.e. YUV420P, RGB24, etc...)
Definition: ReaderBase.h:47
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:53
std::string acodec
The name of the audio codec used to encode / decode the video stream.
Definition: ReaderBase.h:58
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:65
std::string vcodec
The name of the video codec used to encode / decode the video stream.
Definition: ReaderBase.h:52
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square)
Definition: ReaderBase.h:50
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:62
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:40
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:41
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:55
int video_stream_index
The index of the video stream.
Definition: ReaderBase.h:54
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:60
int audio_stream_index
The index of the audio stream.
Definition: ReaderBase.h:63
int64_t file_size
Size of file (in bytes)
Definition: ReaderBase.h:44