diff --git a/src/AvTranscoder/data/decoded/Frame.cpp b/src/AvTranscoder/data/decoded/Frame.cpp index 68491b24..655776c9 100644 --- a/src/AvTranscoder/data/decoded/Frame.cpp +++ b/src/AvTranscoder/data/decoded/Frame.cpp @@ -40,6 +40,11 @@ Frame::~Frame() } } +int Frame::getEncodedSize() const +{ + return av_frame_get_pkt_size(_frame); +} + void Frame::copyData(const Frame& frameToRef) { const int ret = av_frame_copy(_frame, &frameToRef.getAVFrame()); diff --git a/src/AvTranscoder/data/decoded/Frame.hpp b/src/AvTranscoder/data/decoded/Frame.hpp index aa3e45b1..482c92ff 100644 --- a/src/AvTranscoder/data/decoded/Frame.hpp +++ b/src/AvTranscoder/data/decoded/Frame.hpp @@ -43,6 +43,12 @@ class AvExport Frame */ int* getLineSize() const { return _frame->linesize; } + /** + * @return Size of the corresponding packet containing the compressed frame (in bytes) + * @warning returns a negative value if the size is unknown + */ + int getEncodedSize() const; + /** * @brief Copy the data of the given Frame. * @note This function does not allocate anything: the current frame must be already initialized and diff --git a/src/AvTranscoder/file/FormatContext.cpp b/src/AvTranscoder/file/FormatContext.cpp index f15fa69d..44a7c0ca 100644 --- a/src/AvTranscoder/file/FormatContext.cpp +++ b/src/AvTranscoder/file/FormatContext.cpp @@ -53,7 +53,7 @@ FormatContext::~FormatContext() void FormatContext::findStreamInfo(AVDictionary** options) { - int err = avformat_find_stream_info(_avFormatContext, options); + const int err = avformat_find_stream_info(_avFormatContext, options); if(err < 0) { throw std::ios_base::failure("Unable to find stream informations: " + getDescriptionFromErrorCode(err)); @@ -65,7 +65,7 @@ void FormatContext::openRessource(const std::string& url, int flags) if((_avFormatContext->flags & AVFMT_NOFILE) == AVFMT_NOFILE) return; - int err = avio_open2(&_avFormatContext->pb, url.c_str(), flags, NULL, NULL); + const int err = avio_open2(&_avFormatContext->pb, url.c_str(), flags, NULL, NULL); if(err < 0) { throw std::ios_base::failure("Error when opening output format: " + getDescriptionFromErrorCode(err)); @@ -77,7 +77,7 @@ void FormatContext::closeRessource() if((_avFormatContext->flags & AVFMT_NOFILE) == AVFMT_NOFILE) return; - int err = avio_close(_avFormatContext->pb); + const int err = avio_close(_avFormatContext->pb); if(err < 0) { throw std::ios_base::failure("Error when close output format: " + getDescriptionFromErrorCode(err)); @@ -86,7 +86,7 @@ void FormatContext::closeRessource() void FormatContext::writeHeader(AVDictionary** options) { - int ret = avformat_write_header(_avFormatContext, options); + const int ret = avformat_write_header(_avFormatContext, options); if(ret != 0) { throw std::runtime_error("Could not write header: " + getDescriptionFromErrorCode(ret)); @@ -115,7 +115,7 @@ void FormatContext::writeFrame(AVPacket& packet, bool interleaved) void FormatContext::writeTrailer() { - int ret = av_write_trailer(_avFormatContext); + const int ret = av_write_trailer(_avFormatContext); if(ret != 0) { throw std::runtime_error("Could not write trailer: " + getDescriptionFromErrorCode(ret)); @@ -124,7 +124,7 @@ void FormatContext::writeTrailer() void FormatContext::addMetaData(const std::string& key, const std::string& value) { - int ret = av_dict_set(&_avFormatContext->metadata, key.c_str(), value.c_str(), 0); + const int ret = av_dict_set(&_avFormatContext->metadata, key.c_str(), value.c_str(), 0); if(ret < 0) { LOG_ERROR(getDescriptionFromErrorCode(ret)) @@ -144,8 +144,18 @@ AVStream& FormatContext::addAVStream(const AVCodec& avCodec) bool FormatContext::seek(const uint64_t position, const int flag) { - LOG_INFO("Seek in '" << _avFormatContext->filename << "' at " << position << " (in AV_TIME_BASE units)") - int err = av_seek_frame(_avFormatContext, -1, position, flag); + // Check if the format is a raw bitstreams, without any container. + // In this case, avoid seeking. + const std::string formatLongName(_avFormatContext->iformat->long_name ? _avFormatContext->iformat->long_name : ""); + const std::size_t rawIndex = formatLongName.find("raw"); + if(rawIndex != std::string::npos) + { + LOG_WARN("Seek in '" << _avFormatContext->filename << "' is not possible since this is a raw bitstreams without access to timing information.") + return false; + } + + LOG_INFO("Seek in '" << _avFormatContext->filename << "' at " << position << " with flag '"<< flag << "'") + const int err = av_seek_frame(_avFormatContext, -1, position, flag); if(err < 0) { LOG_ERROR("Error when seek at " << position << " (in AV_TIME_BASE units) in file") diff --git a/src/AvTranscoder/file/FormatContext.hpp b/src/AvTranscoder/file/FormatContext.hpp index 2e885f32..dd73e527 100644 --- a/src/AvTranscoder/file/FormatContext.hpp +++ b/src/AvTranscoder/file/FormatContext.hpp @@ -78,6 +78,7 @@ class AvExport FormatContext * @param position: can be in AV_TIME_BASE units, in frames... depending on the flag value * @param flag: seeking mode (AVSEEK_FLAG_xxx) * @return seek status + * @warn No seek is done if the format is a raw bitstreams * @see flushDecoder */ bool seek(const uint64_t position, const int flag); diff --git a/src/AvTranscoder/properties/FileProperties.cpp b/src/AvTranscoder/properties/FileProperties.cpp index c18083fe..7cf4469c 100644 --- a/src/AvTranscoder/properties/FileProperties.cpp +++ b/src/AvTranscoder/properties/FileProperties.cpp @@ -31,7 +31,7 @@ void FileProperties::extractStreamProperties(IProgress& progress, const EAnalyse { clearStreamProperties(); - // if the analysis level wiil decode some streams parts, seek at the beginning + // Seek at first key frame before the analysis if(level > eAnalyseLevelHeader) const_cast(_formatContext)->seek(0, AVSEEK_FLAG_BACKWARD); @@ -119,7 +119,7 @@ void FileProperties::extractStreamProperties(IProgress& progress, const EAnalyse _streams[unknownStreamIndex] = &_unknownStreams.at(streamIndex); } - // if the analysis level has decoded some streams parts, return at the beginning + // Seek at first key frame after the analysis if(level > eAnalyseLevelHeader) const_cast(_formatContext)->seek(0, AVSEEK_FLAG_BACKWARD); } @@ -148,15 +148,10 @@ std::string FileProperties::getFormatLongName() const std::string FileProperties::getFormatMimeType() const { #if LIBAVFORMAT_VERSION_MAJOR <= 55 - LOG_WARN("Cannot get mime type format of '" << getFilename() - << "' because your libavformat library has a major version <= 55.") - return "not available"; + throw std::runtime_error("cannot get mime type format: libavformat library has a major version <= 55."); #else if(_avFormatContext->iformat->mime_type == NULL) - { - LOG_WARN("Unknown demuxer format mime type of '" << getFilename() << "'.") - return ""; - } + throw std::runtime_error("Unknown demuxer format mime type"); return std::string(_avFormatContext->iformat->mime_type); #endif } @@ -235,6 +230,7 @@ PropertyVector& FileProperties::fillVector(PropertyVector& data) const addProperty(data, "filename", &FileProperties::getFilename); addProperty(data, "formatName", &FileProperties::getFormatName); addProperty(data, "formatLongName", &FileProperties::getFormatLongName); + addProperty(data, "mimeType", &FileProperties::getFormatMimeType); addProperty(data, "startTime", &FileProperties::getStartTime); addProperty(data, "duration", &FileProperties::getDuration); diff --git a/src/AvTranscoder/properties/PixelProperties.cpp b/src/AvTranscoder/properties/PixelProperties.cpp index 2f104421..bef52cd4 100644 --- a/src/AvTranscoder/properties/PixelProperties.cpp +++ b/src/AvTranscoder/properties/PixelProperties.cpp @@ -274,7 +274,7 @@ PropertyVector& PixelProperties::fillVector(PropertyVector& data) const } catch(const std::exception& e) { - detail::add(data, "colorComponents", e.what()); + detail::add(data, "colorComponents", detail::propertyValueIfError); } try diff --git a/src/AvTranscoder/properties/StreamProperties.cpp b/src/AvTranscoder/properties/StreamProperties.cpp index 1bc28f31..23e00bb2 100644 --- a/src/AvTranscoder/properties/StreamProperties.cpp +++ b/src/AvTranscoder/properties/StreamProperties.cpp @@ -53,7 +53,10 @@ Rational StreamProperties::getTimeBase() const float StreamProperties::getDuration() const { const Rational timeBase = getTimeBase(); - return av_q2d(timeBase) * _formatContext->streams[_streamIndex]->duration; + const size_t streamDurationInStreamTimeBase = _formatContext->streams[_streamIndex]->duration; + if(streamDurationInStreamTimeBase == (size_t)AV_NOPTS_VALUE) + throw std::runtime_error("unknown stream duration"); + return av_q2d(timeBase) * streamDurationInStreamTimeBase; } AVMediaType StreamProperties::getStreamType() const diff --git a/src/AvTranscoder/properties/StreamProperties.hpp b/src/AvTranscoder/properties/StreamProperties.hpp index f3c68e04..df79cf8f 100644 --- a/src/AvTranscoder/properties/StreamProperties.hpp +++ b/src/AvTranscoder/properties/StreamProperties.hpp @@ -18,6 +18,13 @@ class AvExport StreamProperties size_t getStreamIndex() const { return _streamIndex; } size_t getStreamId() const; Rational getTimeBase() const; + + /** + * @return duration of the stream in seconds + * @throw runtime_error if the duration is unknown + * @note If a source file does not specify a duration, but does specify + * a bitrate, this value will be estimated from bitrate and file size. + */ float getDuration() const; ///< in seconds AVMediaType getStreamType() const; diff --git a/src/AvTranscoder/properties/VideoProperties.cpp b/src/AvTranscoder/properties/VideoProperties.cpp index 0dd86102..e2e56c1e 100644 --- a/src/AvTranscoder/properties/VideoProperties.cpp +++ b/src/AvTranscoder/properties/VideoProperties.cpp @@ -1,5 +1,6 @@ #include "VideoProperties.hpp" +#include #include #include @@ -19,9 +20,11 @@ namespace avtranscoder VideoProperties::VideoProperties(const FormatContext& formatContext, const size_t index, IProgress& progress, const EAnalyseLevel level) : StreamProperties(formatContext, index) + , _levelAnalysis(level) , _pixelProperties() , _isInterlaced(false) , _isTopFieldFirst(false) + , _gopSize(0) , _gopStructure() , _firstGopTimeCode(-1) { @@ -31,7 +34,7 @@ VideoProperties::VideoProperties(const FormatContext& formatContext, const size_ _firstGopTimeCode = _codecContext->timecode_frame_start; } - if(level == eAnalyseLevelFirstGop) + if(_levelAnalysis == eAnalyseLevelFirstGop) analyseGopStructure(progress); } @@ -43,11 +46,8 @@ std::string VideoProperties::getProfileName() const if(_codec->capabilities & CODEC_CAP_TRUNCATED) _codecContext->flags |= CODEC_FLAG_TRUNCATED; - if(_codecContext->profile == -99) - throw std::runtime_error("unknown codec profile"); - const char* profile = NULL; - if((profile = av_get_profile_name(_codec, _codecContext->profile)) == NULL) + if((profile = av_get_profile_name(_codec, getProfile())) == NULL) throw std::runtime_error("unknown codec profile"); return std::string(profile); @@ -333,31 +333,31 @@ size_t VideoProperties::getBitRate() const if(!_codecContext->width || !_codecContext->height) throw std::runtime_error("cannot compute bit rate: invalid frame size"); + // Needed to get the gop size + if(_levelAnalysis < eAnalyseLevelFirstGop) + throw std::runtime_error("cannot compute bit rate: need to get info from the first gop (see eAnalyseLevelFirstGop)"); + // discard no frame type when decode _codecContext->skip_frame = AVDISCARD_NONE; -#if LIBAVCODEC_VERSION_MAJOR > 54 - AVFrame* frame = av_frame_alloc(); -#else - AVFrame* frame = avcodec_alloc_frame(); -#endif + Frame frame; AVPacket pkt; av_init_packet(&pkt); avcodec_open2(_codecContext, _codec, NULL); int gotFrame = 0; - int count = 0; + size_t count = 0; int gopFramesSize = 0; while(!av_read_frame(const_cast(_formatContext), &pkt)) { if(pkt.stream_index == (int)_streamIndex) { - avcodec_decode_video2(_codecContext, frame, &gotFrame, &pkt); + avcodec_decode_video2(_codecContext, &frame.getAVFrame(), &gotFrame, &pkt); if(gotFrame) { #if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54, 7, 100) - gopFramesSize += av_frame_get_pkt_size(frame); + gopFramesSize += frame.getEncodedSize(); #else gopFramesSize += pkt.size; #endif @@ -365,19 +365,12 @@ size_t VideoProperties::getBitRate() const } } av_free_packet(&pkt); - if(_codecContext->gop_size == count) + if(getGopSize() == count) break; } -#if LIBAVCODEC_VERSION_MAJOR > 54 - av_frame_free(&frame); -#elif LIBAVCODEC_VERSION_MAJOR > 53 - avcodec_free_frame(&frame); -#else - av_free(frame); -#endif int bitsPerByte = 8; - return (gopFramesSize / _codecContext->gop_size) * bitsPerByte * getFps(); + return (gopFramesSize / getGopSize()) * bitsPerByte * getFps(); } size_t VideoProperties::getMaxBitRate() const @@ -425,13 +418,6 @@ size_t VideoProperties::getHeight() const return _codecContext->height; } -size_t VideoProperties::getGopSize() const -{ - if(!_codecContext) - throw std::runtime_error("unknown codec context"); - return _codecContext->gop_size; -} - size_t VideoProperties::getDtgActiveFormat() const { if(!_codecContext) @@ -493,61 +479,55 @@ void VideoProperties::analyseGopStructure(IProgress& progress) AVPacket pkt; av_init_packet(&pkt); -// Allocate frame -#if LIBAVCODEC_VERSION_MAJOR > 54 - AVFrame* frame = av_frame_alloc(); -#else - AVFrame* frame = avcodec_alloc_frame(); -#endif - // Initialize the AVCodecContext to use the given AVCodec avcodec_open2(_codecContext, _codec, NULL); - int count = 0; + Frame frame; + size_t count = 0; int gotFrame = 0; - bool stopAnalyse = false; + int positionOfFirstKeyFrame = -1; + int positionOfLastKeyFrame = -1; while(!av_read_frame(const_cast(_formatContext), &pkt)) { if(pkt.stream_index == (int)_streamIndex) { - avcodec_decode_video2(_codecContext, frame, &gotFrame, &pkt); + avcodec_decode_video2(_codecContext, &frame.getAVFrame(), &gotFrame, &pkt); if(gotFrame) { + AVFrame& avFrame = frame.getAVFrame(); + _gopStructure.push_back( - std::make_pair(av_get_picture_type_char(frame->pict_type), frame->key_frame)); - _isInterlaced = frame->interlaced_frame; - _isTopFieldFirst = frame->top_field_first; + std::make_pair(av_get_picture_type_char(avFrame.pict_type), frame.getEncodedSize())); + _isInterlaced = avFrame.interlaced_frame; + _isTopFieldFirst = avFrame.top_field_first; + if(avFrame.pict_type == AV_PICTURE_TYPE_I) + { + if(positionOfFirstKeyFrame == -1) + positionOfFirstKeyFrame = count; + else + positionOfLastKeyFrame = count; + } + ++count; - if(progress.progress(count, _codecContext->gop_size) == eJobStatusCancel) - stopAnalyse = true; } } - av_free_packet(&pkt); - if(_codecContext->gop_size == count) + // If the first 2 key frames are found + if(positionOfFirstKeyFrame != -1 && positionOfLastKeyFrame != -1) { - stopAnalyse = true; - } - - if(stopAnalyse) + // Set gop size as distance between these 2 key frames + _gopSize = positionOfLastKeyFrame - positionOfFirstKeyFrame; + // Update gop structure to keep only one gop + while(_gopStructure.size() > _gopSize) + _gopStructure.pop_back(); break; + } } // Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext itself) avcodec_close(_codecContext); - -// Free frame -#if LIBAVCODEC_VERSION_MAJOR > 54 - av_frame_free(&frame); -#else -#if LIBAVCODEC_VERSION_MAJOR > 53 - avcodec_free_frame(&frame); -#else - av_free(frame); -#endif -#endif } } } @@ -573,8 +553,6 @@ PropertyVector& VideoProperties::fillVector(PropertyVector& data) const addProperty(data, "colorRange", &VideoProperties::getColorRange); addProperty(data, "colorPrimaries", &VideoProperties::getColorPrimaries); addProperty(data, "chromaSampleLocation", &VideoProperties::getChromaSampleLocation); - addProperty(data, "interlaced ", &VideoProperties::isInterlaced); - addProperty(data, "topFieldFirst", &VideoProperties::isTopFieldFirst); addProperty(data, "fieldOrder", &VideoProperties::getFieldOrder); addProperty(data, "fps", &VideoProperties::getFps); addProperty(data, "nbFrame", &VideoProperties::getNbFrames); @@ -582,19 +560,36 @@ PropertyVector& VideoProperties::fillVector(PropertyVector& data) const addProperty(data, "bitRate", &VideoProperties::getBitRate); addProperty(data, "maxBitRate", &VideoProperties::getMaxBitRate); addProperty(data, "minBitRate", &VideoProperties::getMinBitRate); - addProperty(data, "gopSize", &VideoProperties::getGopSize); + addProperty(data, "hasBFrames", &VideoProperties::hasBFrames); + addProperty(data, "referencesFrames", &VideoProperties::getReferencesFrames); - std::string gop; - for(size_t frameIndex = 0; frameIndex < _gopStructure.size(); ++frameIndex) + // Add properties available when decode first gop + if(_levelAnalysis < eAnalyseLevelFirstGop) { - gop += _gopStructure.at(frameIndex).first; - gop += " "; + detail::add(data, "gopSize", detail::propertyValueIfError); + detail::add(data, "gop", detail::propertyValueIfError); + detail::add(data, "interlaced", detail::propertyValueIfError); + detail::add(data, "topFieldFirst", detail::propertyValueIfError); } - detail::add(data, "gop", gop); - // detail::add( data, "isClosedGop", isClosedGop() ); + else + { + addProperty(data, "gopSize", &VideoProperties::getGopSize); - addProperty(data, "hasBFrames", &VideoProperties::hasBFrames); - addProperty(data, "referencesFrames", &VideoProperties::getReferencesFrames); + std::stringstream gop; + for(size_t frameIndex = 0; frameIndex < _gopStructure.size(); ++frameIndex) + { + gop << _gopStructure.at(frameIndex).first; + gop << "("; + gop << _gopStructure.at(frameIndex).second;; + gop << ")"; + gop << " "; + } + detail::add(data, "gop", gop.str()); + // detail::add( data, "isClosedGop", isClosedGop() ); + + addProperty(data, "interlaced ", &VideoProperties::isInterlaced); + addProperty(data, "topFieldFirst", &VideoProperties::isTopFieldFirst); + } // Add properties of the pixel PropertyVector pixelProperties; diff --git a/src/AvTranscoder/properties/VideoProperties.hpp b/src/AvTranscoder/properties/VideoProperties.hpp index bd1cc301..66ef46e7 100644 --- a/src/AvTranscoder/properties/VideoProperties.hpp +++ b/src/AvTranscoder/properties/VideoProperties.hpp @@ -47,7 +47,6 @@ class AvExport VideoProperties : public StreamProperties size_t getTicksPerFrame() const; size_t getWidth() const; size_t getHeight() const; - size_t getGopSize() const; size_t getDtgActiveFormat() const; size_t getReferencesFrames() const; int getProfile() const; @@ -71,7 +70,11 @@ class AvExport VideoProperties : public StreamProperties // @see analyseGopStructure bool isInterlaced() const { return _isInterlaced; } bool isTopFieldFirst() const { return _isTopFieldFirst; } - std::vector > getGopStructure() const { return _gopStructure; } + /** + * @return the distance between two nearest I frame + */ + size_t getGopSize() const { return _gopSize; } + std::vector > getGopStructure() const { return _gopStructure; } //@} #ifndef SWIG @@ -104,12 +107,22 @@ class AvExport VideoProperties : public StreamProperties #endif private: + /** + * @brief Level of analysis asked. + */ + EAnalyseLevel _levelAnalysis; + + /** + * @brief All the pixel properties contained in this stream. + */ PixelProperties _pixelProperties; + //@{ // Can acces these data when analyse first gop bool _isInterlaced; bool _isTopFieldFirst; - std::vector > _gopStructure; + size_t _gopSize; + std::vector > _gopStructure; ///< picture type, encoded frame size in bytes //@} /** diff --git a/src/AvTranscoder/properties/properties.i b/src/AvTranscoder/properties/properties.i index 00cbaeaa..0bbb720f 100644 --- a/src/AvTranscoder/properties/properties.i +++ b/src/AvTranscoder/properties/properties.i @@ -40,8 +40,8 @@ namespace std { %template(PropertyPair) pair< string, string >; %template(PropertyVector) vector< pair< string, string > >; -%template(GopPair) pair< char, bool >; -%template(GopVector) vector< pair< char, bool > >; +%template(GopPair) pair< char, int >; +%template(GopVector) vector< pair< char, int > >; %template(ChannelVector) vector< avtranscoder::Channel >; } diff --git a/test/pyTest/testInputFile.py b/test/pyTest/testInputFile.py index d9b4f247..3fdadc60 100644 --- a/test/pyTest/testInputFile.py +++ b/test/pyTest/testInputFile.py @@ -31,6 +31,7 @@ def testInputFileAnalyseFirstGop(): videoProperties = inputFile.getProperties().getVideoProperties()[0] assert_equals(videoProperties.isInterlaced(), False) assert_equals(videoProperties.isTopFieldFirst(), False) + assert_equals(videoProperties.getGopSize(), 0) assert_equals(videoProperties.getGopStructure(), ()) # Analyse first GOP @@ -39,4 +40,10 @@ def testInputFileAnalyseFirstGop(): # Check properties after GOP analysis videoProperties = inputFile.getProperties().getVideoProperties()[0] + assert_greater(videoProperties.getGopSize(), 0) assert_not_equals(videoProperties.getGopStructure(), ()) + for image in videoProperties.getGopStructure(): + pictureType = image[0] + encodedPictureSize = image[1] + assert_in(pictureType, ['I', 'P', 'B']) + assert_greater(encodedPictureSize, 0) diff --git a/test/pyTest/testTranscoderTranscodeVideo.py b/test/pyTest/testTranscoderTranscodeVideo.py index 9bdcfb63..8f486f94 100644 --- a/test/pyTest/testTranscoderTranscodeVideo.py +++ b/test/pyTest/testTranscoderTranscodeVideo.py @@ -35,6 +35,7 @@ def testTranscodeDnxhd120(): # get dst file of transcode dst_inputFile = av.InputFile( outputFileName ) + dst_inputFile.analyse(progress, av.eAnalyseLevelFirstGop) dst_properties = dst_inputFile.getProperties() dst_videoStream = dst_properties.getVideoProperties()[0] @@ -43,7 +44,9 @@ def testTranscodeDnxhd120(): deltaBitRate = expectedBitRate * 0.05 assert_almost_equals( expectedBitRate, dst_videoStream.getBitRate(), delta=deltaBitRate ) assert_equals( "yuv422p", dst_videoStream.getPixelProperties().getPixelName() ) - # assert_equals( 1, dst_videoStream.getGopSize() ) # 1 != 12L + assert_equals( 1920, dst_videoStream.getWidth() ) + assert_equals( 1080, dst_videoStream.getHeight() ) + assert_equals( 1, dst_videoStream.getGopSize() ) def testTranscodeDnxhd185(): """ @@ -70,6 +73,7 @@ def testTranscodeDnxhd185(): # get dst file of transcode dst_inputFile = av.InputFile( outputFileName ) + dst_inputFile.analyse(progress, av.eAnalyseLevelFirstGop) dst_properties = dst_inputFile.getProperties() dst_videoStream = dst_properties.getVideoProperties()[0] @@ -78,7 +82,9 @@ def testTranscodeDnxhd185(): deltaBitRate = expectedBitRate * 0.05 assert_almost_equals( expectedBitRate, dst_videoStream.getBitRate(), delta=deltaBitRate ) assert_equals( "yuv422p", dst_videoStream.getPixelProperties().getPixelName() ) - # assert_equals( 1, dst_videoStream.getGopSize() ) # 1 != 12L + assert_equals( 1920, dst_videoStream.getWidth() ) + assert_equals( 1080, dst_videoStream.getHeight() ) + assert_equals( 1, dst_videoStream.getGopSize() ) def testTranscodeDnxhd185x(): """ @@ -105,6 +111,7 @@ def testTranscodeDnxhd185x(): # get dst file of transcode dst_inputFile = av.InputFile( outputFileName ) + dst_inputFile.analyse(progress, av.eAnalyseLevelFirstGop) dst_properties = dst_inputFile.getProperties() dst_videoStream = dst_properties.getVideoProperties()[0] @@ -113,7 +120,9 @@ def testTranscodeDnxhd185x(): deltaBitRate = expectedBitRate * 0.05 assert_almost_equals( expectedBitRate, dst_videoStream.getBitRate(), delta=deltaBitRate ) assert_equals( "yuv422p10le", dst_videoStream.getPixelProperties().getPixelName() ) - # assert_equals( 1, dst_videoStream.getGopSize() ) # 1 != 12L + assert_equals( 1920, dst_videoStream.getWidth() ) + assert_equals( 1080, dst_videoStream.getHeight() ) + assert_equals( 1, dst_videoStream.getGopSize() ) def testTranscodeYUV420(): """