Merge pull request #407 from D4N/fix_400
Fix issue #400 (overreads in PngChunk::parseTXTChunk())
This commit is contained in:
commit
a93cb04361
@ -28,14 +28,12 @@
|
||||
|
||||
#include "helper_functions.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <string.h>
|
||||
|
||||
|
||||
std::string string_from_unterminated(const char* data, size_t data_length)
|
||||
{
|
||||
const char* termination = std::find(data, data + data_length, 0);
|
||||
// if find returned the end iterator => no \0 found
|
||||
const size_t string_length = termination == data + data_length ? data_length : termination - data;
|
||||
const size_t StringLength = strnlen(data, data_length);
|
||||
|
||||
return std::string(data, string_length);
|
||||
return std::string(data, StringLength);
|
||||
}
|
||||
|
||||
@ -37,6 +37,13 @@
|
||||
Convert a C style string that may or may not be null terminated safely
|
||||
into a std::string. The string's termination is either set at the first \0
|
||||
or after data_length characters.
|
||||
|
||||
@param[in] data A c-string from which the std::string shall be
|
||||
constructed. Does not need to be null terminated.
|
||||
@param[in] data_length An upper bound for the string length (must be at most
|
||||
the allocated length of `buffer`). If no null terminator is found in data,
|
||||
then the resulting std::string will be null terminated at `data_length`.
|
||||
|
||||
*/
|
||||
std::string string_from_unterminated(const char* data, size_t data_length);
|
||||
|
||||
|
||||
@ -34,6 +34,8 @@
|
||||
#include "image.hpp"
|
||||
#include "error.hpp"
|
||||
#include "enforce.hpp"
|
||||
#include "helper_functions.hpp"
|
||||
#include "safe_op.hpp"
|
||||
|
||||
// + standard includes
|
||||
#include <sstream>
|
||||
@ -133,6 +135,8 @@ namespace Exiv2 {
|
||||
|
||||
if(type == zTXt_Chunk)
|
||||
{
|
||||
enforce(data.size_ >= Safe::add(keysize, 2), Exiv2::kerCorruptedMetadata);
|
||||
|
||||
// Extract a deflate compressed Latin-1 text chunk
|
||||
|
||||
// we get the compression method after the key
|
||||
@ -149,11 +153,13 @@ namespace Exiv2 {
|
||||
// compressed string after the compression technique spec
|
||||
const byte* compressedText = data.pData_ + keysize + 2;
|
||||
unsigned int compressedTextSize = data.size_ - keysize - 2;
|
||||
enforce(compressedTextSize < data.size_, kerCorruptedMetadata);
|
||||
|
||||
zlibUncompress(compressedText, compressedTextSize, arr);
|
||||
}
|
||||
else if(type == tEXt_Chunk)
|
||||
{
|
||||
enforce(data.size_ >= Safe::add(keysize, 1), Exiv2::kerCorruptedMetadata);
|
||||
// Extract a non-compressed Latin-1 text chunk
|
||||
|
||||
// the text comes after the key, but isn't null terminated
|
||||
@ -164,6 +170,7 @@ namespace Exiv2 {
|
||||
}
|
||||
else if(type == iTXt_Chunk)
|
||||
{
|
||||
enforce(data.size_ >= Safe::add(keysize, 3), Exiv2::kerCorruptedMetadata);
|
||||
const int nullSeparators = std::count(&data.pData_[keysize+3], &data.pData_[data.size_], '\0');
|
||||
enforce(nullSeparators >= 2, Exiv2::kerCorruptedMetadata);
|
||||
|
||||
@ -178,41 +185,45 @@ namespace Exiv2 {
|
||||
enforce(compressionMethod == 0x00, Exiv2::kerCorruptedMetadata);
|
||||
|
||||
// language description string after the compression technique spec
|
||||
std::string languageText((const char*)(data.pData_ + keysize + 3));
|
||||
unsigned int languageTextSize = static_cast<unsigned int>(languageText.size());
|
||||
const size_t languageTextMaxSize = data.size_ - keysize - 3;
|
||||
std::string languageText =
|
||||
string_from_unterminated((const char*)(data.pData_ + Safe::add(keysize, 3)), languageTextMaxSize);
|
||||
const unsigned int languageTextSize = static_cast<unsigned int>(languageText.size());
|
||||
|
||||
enforce(data.size_ >= Safe::add(static_cast<unsigned int>(Safe::add(keysize, 4)), languageTextSize),
|
||||
Exiv2::kerCorruptedMetadata);
|
||||
// translated keyword string after the language description
|
||||
std::string translatedKeyText((const char*)(data.pData_ + keysize + 3 + languageTextSize +1));
|
||||
unsigned int translatedKeyTextSize = static_cast<unsigned int>(translatedKeyText.size());
|
||||
std::string translatedKeyText =
|
||||
string_from_unterminated((const char*)(data.pData_ + keysize + 3 + languageTextSize + 1),
|
||||
data.size_ - (keysize + 3 + languageTextSize + 1));
|
||||
const unsigned int translatedKeyTextSize = static_cast<unsigned int>(translatedKeyText.size());
|
||||
|
||||
if ( compressionFlag == 0x00 )
|
||||
{
|
||||
// then it's an uncompressed iTXt chunk
|
||||
#ifdef DEBUG
|
||||
std::cout << "Exiv2::PngChunk::parseTXTChunk: We found an uncompressed iTXt field\n";
|
||||
#endif
|
||||
if ((compressionFlag == 0x00) || (compressionFlag == 0x01 && compressionMethod == 0x00)) {
|
||||
enforce(Safe::add(static_cast<unsigned int>(keysize + 3 + languageTextSize + 1),
|
||||
Safe::add(translatedKeyTextSize, 1u)) <= data.size_,
|
||||
Exiv2::kerCorruptedMetadata);
|
||||
|
||||
// the text comes after the translated keyword, but isn't null terminated
|
||||
const byte* text = data.pData_ + keysize + 3 + languageTextSize + 1 + translatedKeyTextSize + 1;
|
||||
long textsize = data.size_ - (keysize + 3 + languageTextSize + 1 + translatedKeyTextSize + 1);
|
||||
const long textsize = data.size_ - (keysize + 3 + languageTextSize + 1 + translatedKeyTextSize + 1);
|
||||
|
||||
arr.alloc(textsize);
|
||||
arr = DataBuf(text, textsize);
|
||||
}
|
||||
else if ( compressionFlag == 0x01 && compressionMethod == 0x00 )
|
||||
{
|
||||
// then it's a zlib compressed iTXt chunk
|
||||
if (compressionFlag == 0x00) {
|
||||
// then it's an uncompressed iTXt chunk
|
||||
#ifdef DEBUG
|
||||
std::cout << "Exiv2::PngChunk::parseTXTChunk: We found a zlib compressed iTXt field\n";
|
||||
std::cout << "Exiv2::PngChunk::parseTXTChunk: We found an uncompressed iTXt field\n";
|
||||
#endif
|
||||
|
||||
// the compressed text comes after the translated keyword, but isn't null terminated
|
||||
const byte* compressedText = data.pData_ + keysize + 3 + languageTextSize + 1 + translatedKeyTextSize + 1;
|
||||
long compressedTextSize = data.size_ - (keysize + 3 + languageTextSize + 1 + translatedKeyTextSize + 1);
|
||||
arr.alloc(textsize);
|
||||
arr = DataBuf(text, textsize);
|
||||
} else if (compressionFlag == 0x01 && compressionMethod == 0x00) {
|
||||
// then it's a zlib compressed iTXt chunk
|
||||
#ifdef DEBUG
|
||||
std::cout << "Exiv2::PngChunk::parseTXTChunk: We found a zlib compressed iTXt field\n";
|
||||
#endif
|
||||
|
||||
zlibUncompress(compressedText, compressedTextSize, arr);
|
||||
}
|
||||
else
|
||||
{
|
||||
// the compressed text comes after the translated keyword, but isn't null terminated
|
||||
zlibUncompress(text, textsize, arr);
|
||||
}
|
||||
} else {
|
||||
// then it isn't zlib compressed and we are sunk
|
||||
#ifdef DEBUG
|
||||
std::cerr << "Exiv2::PngChunk::parseTXTChunk: Non-standard iTXt compression method.\n";
|
||||
|
||||
BIN
test/data/issue_400_poc1
Normal file
BIN
test/data/issue_400_poc1
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 372 B |
BIN
test/data/issue_400_poc2
Normal file
BIN
test/data/issue_400_poc2
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 8.6 KiB |
22
tests/bugfixes/github/test_issue_400.py
Normal file
22
tests/bugfixes/github/test_issue_400.py
Normal file
@ -0,0 +1,22 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import system_tests
|
||||
|
||||
|
||||
class parseTXTChunkOutOfBoundsRead(metaclass=system_tests.CaseMeta):
|
||||
|
||||
url = "https://github.com/Exiv2/exiv2/issues/400"
|
||||
|
||||
filenames = [
|
||||
system_tests.path("$data_path/issue_400_poc" + str(i)) for i in (1, 2)
|
||||
]
|
||||
|
||||
commands = ["$exiv2 " + fname for fname in filenames]
|
||||
stdout = [""] * 2
|
||||
stderr = [
|
||||
"""$exiv2_exception_message """ + fname + """:
|
||||
$kerCorruptedMetadata
|
||||
"""
|
||||
for fname in filenames
|
||||
]
|
||||
retval = [1] * 2
|
||||
Loading…
Reference in New Issue
Block a user