Skip to content

Commit

Permalink
[release/6.0-staging] Zlib: Update zlib to v1.2.13, intel-zlib to v1.…
Browse files Browse the repository at this point in the history
…2.13_jtk; add memory protections (#93634)

* Update zlib and zlib-intel to 1.2.13

* Add missing cgmanifest.json registrations

* Add defensive zeroing zlib allocator

* Put zlib-version files under the correct directory

* Add missing INTERNAL_ZLIB_INTEL definition
  • Loading branch information
GrabYourPitchforks committed Oct 18, 2023
1 parent b67631d commit 3ae53c0
Show file tree
Hide file tree
Showing 84 changed files with 33,734 additions and 3,656 deletions.
6 changes: 3 additions & 3 deletions THIRD-PARTY-NOTICES.TXT
Original file line number Diff line number Diff line change
Expand Up @@ -70,12 +70,12 @@ License notice for Zlib
-----------------------

https://github.com/madler/zlib
http://zlib.net/zlib_license.html
https://zlib.net/zlib_license.html

/* zlib.h -- interface of the 'zlib' general purpose compression library
version 1.2.11, January 15th, 2017
version 1.2.13, October 13th, 2022

Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler

This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
Expand Down
9 changes: 4 additions & 5 deletions src/libraries/Native/AnyOS/zlib/pal_zlib.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@
#define c_static_assert(e) static_assert((e),"")
#endif

#if defined(_WIN32) || defined(__EMSCRIPTEN__)
#ifdef INTERNAL_ZLIB_INTEL
#include "../../Windows/System.IO.Compression.Native/zlib-intel/zlib.h"
#elif defined(_WIN32) || defined(__EMSCRIPTEN__)
#include "../../Windows/System.IO.Compression.Native/zlib/zlib.h"
#else
#include "pal_utilities.h"
Expand Down Expand Up @@ -40,14 +42,11 @@ Initializes the PAL_ZStream by creating and setting its underlying z_stream.
*/
static int32_t Init(PAL_ZStream* stream)
{
z_stream* zStream = (z_stream*)malloc(sizeof(z_stream));
z_stream* zStream = (z_stream*)calloc(1, sizeof(z_stream));
stream->internalState = zStream;

if (zStream != NULL)
{
zStream->zalloc = Z_NULL;
zStream->zfree = Z_NULL;
zStream->opaque = Z_NULL;
return PAL_Z_OK;
}
else
Expand Down
201 changes: 201 additions & 0 deletions src/libraries/Native/AnyOS/zlib/zlib_allocator_unix.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,201 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

#include <stdbool.h>
#include <stdint.h>
#include "../../Windows/System.IO.Compression.Native/zlib/zutil.h"

/* A custom allocator for zlib that provides some defense-in-depth over standard malloc / free.
* (non-Windows version)
*
* 1. When zlib allocates fixed-length data structures for containing stream metadata, we zero
* the memory before using it, preventing use of uninitialized memory within these structures.
* Ideally we would do this for dynamically-sized buffers as well, but there is a measurable
* perf impact to doing this. Zeroing fixed structures seems like a good trade-off here, since
* these data structures contain most of the metadata used for managing the variable-length
* dynamically allocated buffers.
*
* 2. We put a cookie both before and after any allocated memory, which allows us to detect local
* buffer overruns on the call to free(). The cookie values are tied to the addresses where
* the data is located in memory.
*
* 3. We trash the aforementioned cookie on free(), which allows us to detect double-free.
*
* If any of these checks fails, the application raises SIGABRT.
*/

static bool IsMitigationDisabled()
{
enum _MitigationEnablementTristate
{
MITIGATION_NOT_YET_QUERIED = 0,
MITIGATION_DISABLED = 1,
MITIGATION_ENABLED = 2 // really, anything other than 0 or 1
};
static int s_fMitigationEnablementState = MITIGATION_NOT_YET_QUERIED;

// If already initialized, return immediately.
// We don't need a volatile read here since the publish is performed with release semantics.
if (s_fMitigationEnablementState != MITIGATION_NOT_YET_QUERIED)
{
return (s_fMitigationEnablementState == MITIGATION_DISABLED);
}

// Initialize the tri-state now.
// It's ok for multiple threads to do this simultaneously. Only one thread will win.
// Valid env var values to disable mitigation: "true" and "1"
// All other env var values (or error) leaves mitigation enabled.

char* pchEnvVar = getenv("DOTNET_SYSTEM_IO_COMPRESSION_DISABLEZLIBMITIGATIONS");
bool fMitigationDisabled = (pchEnvVar && (strcmp(pchEnvVar, "1") == 0 || strcmp(pchEnvVar, "true") == 0));

// We really don't care about the return value of the ICE operation. If another thread
// beat us to it, so be it. The recursive call will figure it out.
__sync_val_compare_and_swap(
/* destination: */ &s_fMitigationEnablementState,
/* comparand: */ MITIGATION_NOT_YET_QUERIED,
/* exchange: */ fMitigationDisabled ? MITIGATION_DISABLED : MITIGATION_ENABLED);
return IsMitigationDisabled();
}

#ifndef MEMORY_ALLOCATION_ALIGNMENT
// malloc() returns an address suitably aligned for any built-in data type.
// Historically, this has been twice the arch's natural word size.
#ifdef HOST_64BIT
#define MEMORY_ALLOCATION_ALIGNMENT 16
#else
#define MEMORY_ALLOCATION_ALIGNMENT 8
#endif
#endif

typedef struct _DOTNET_ALLOC_COOKIE
{
void* Address;
size_t Size;
} DOTNET_ALLOC_COOKIE;

static bool SafeAdd(size_t a, size_t b, size_t* sum)
{
if (SIZE_MAX - a >= b) { *sum = a + b; return true; }
else { *sum = 0; return false; }
}

static bool SafeMult(size_t a, size_t b, size_t* product)
{
if (SIZE_MAX / a >= b) { *product = a * b; return true; }
else { *product = 0; return false; }
}

static DOTNET_ALLOC_COOKIE ReadAllocCookieUnaligned(const void* pSrc)
{
DOTNET_ALLOC_COOKIE vCookie;
memcpy(&vCookie, pSrc, sizeof(DOTNET_ALLOC_COOKIE));
return vCookie;
}

static void WriteAllocCookieUnaligned(void* pDest, DOTNET_ALLOC_COOKIE vCookie)
{
memcpy(pDest, &vCookie, sizeof(DOTNET_ALLOC_COOKIE));
}

// Historically, the memory allocator always returns addresses aligned to some
// particular boundary. We'll make that same guarantee here just in case somebody
// depends on it.
const size_t DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING = (sizeof(DOTNET_ALLOC_COOKIE) + MEMORY_ALLOCATION_ALIGNMENT - 1) & ~((size_t)MEMORY_ALLOCATION_ALIGNMENT - 1);
const size_t DOTNET_ALLOC_TRAILER_COOKIE_SIZE = sizeof(DOTNET_ALLOC_COOKIE);

voidpf ZLIB_INTERNAL zcalloc(opaque, items, size)
voidpf opaque;
unsigned items;
unsigned size;
{
(void)opaque; // unreferenced formal parameter

if (IsMitigationDisabled())
{
// fallback logic copied from zutil.c
return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) :
(voidpf)calloc(items, size);
}

// If initializing a fixed-size structure, zero the memory.
bool fZeroMemory = (items == 1);

size_t cbRequested;
if (sizeof(items) + sizeof(size) <= sizeof(cbRequested))
{
// multiplication can't overflow; no need for safeint
cbRequested = (size_t)items * (size_t)size;
}
else
{
// multiplication can overflow; go through safeint
if (!SafeMult((size_t)items, (size_t)size, &cbRequested)) { return NULL; }
}

// Make sure the actual allocation has enough room for our frontside & backside cookies.
size_t cbActualAllocationSize;
if (!SafeAdd(cbRequested, DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING + DOTNET_ALLOC_TRAILER_COOKIE_SIZE, &cbActualAllocationSize)) { return NULL; }

void* pAlloced = (fZeroMemory) ? calloc(1, cbActualAllocationSize) : malloc(cbActualAllocationSize);
if (pAlloced == NULL) { return NULL; } // OOM

DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)pAlloced;
uint8_t* pReturnToCaller = (uint8_t*)pAlloced + DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING;
uint8_t* pTrailerCookie = pReturnToCaller + cbRequested;

// Write out the same cookie for the header & the trailer, then we're done.

DOTNET_ALLOC_COOKIE vCookie = { 0 };
vCookie.Address = pReturnToCaller;
vCookie.Size = cbRequested;
*pHeaderCookie = vCookie; // aligned
WriteAllocCookieUnaligned(pTrailerCookie, vCookie);

return pReturnToCaller;
}

static void zcfree_trash_cookie(void* pCookie)
{
memset(pCookie, 0, sizeof(DOTNET_ALLOC_COOKIE));
}

void ZLIB_INTERNAL zcfree(opaque, ptr)
voidpf opaque;
voidpf ptr;
{
(void)opaque; // unreferenced formal parameter

if (IsMitigationDisabled())
{
// fallback logic copied from zutil.c
free(ptr);
return;
}

if (ptr == NULL) { return; } // ok to free nullptr

// Check cookie at beginning

DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)((uint8_t*)ptr - DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING);
if (pHeaderCookie->Address != ptr) { goto Fail; }
size_t cbRequested = pHeaderCookie->Size;

// Check cookie at end

uint8_t* pTrailerCookie = (uint8_t*)ptr + cbRequested;
DOTNET_ALLOC_COOKIE vTrailerCookie = ReadAllocCookieUnaligned(pTrailerCookie);
if (vTrailerCookie.Address != ptr) { goto Fail; }
if (vTrailerCookie.Size != cbRequested) { goto Fail; }

// Checks passed - now trash the cookies and free memory

zcfree_trash_cookie(pHeaderCookie);
zcfree_trash_cookie(pTrailerCookie);

free(pHeaderCookie);
return;

Fail:
abort(); // cookie check failed
}
Loading

0 comments on commit 3ae53c0

Please sign in to comment.