Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP

Comparing changes

Choose two branches to see what's changed or to start a new pull request. If you need to, you can also compare across forks.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also compare across forks.
base fork: gray/compress-snappy
base: 350118d139
...
head fork: gray/compress-snappy
compare: 122affede2
Checking mergeability… Don't worry, you can still create the pull request.
  • 4 commits
  • 10 files changed
  • 0 commit comments
  • 1 contributor
View
3  Changes
@@ -1,5 +1,8 @@
Revision history for Compress-Snappy
+0.20 Mon Apr 1 04:03:58 UTC 2013
+ - Integrated latest csnappy source for windows fix. (RT #83778)
+
0.19 Sat Sep 8 17:56:50 UTC 2012
- Allowed overloaded objects to be stringified.
View
1  MANIFEST
@@ -8,6 +8,7 @@ ppport.h
README
Snappy.xs
src/csnappy.h
+src/csnappy_compat.h
src/csnappy_compress.c
src/csnappy_decompress.c
src/csnappy_internal.h
View
2  README
@@ -24,7 +24,7 @@ A C compiler is required to build this module.
COPYRIGHT AND LICENCE
-Copyright (C) 2011-2012 by gray <gray@cpan.org>
+Copyright (C) 2011-2013 by gray <gray@cpan.org>
This library is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
View
4 lib/Compress/Snappy.pm
@@ -6,7 +6,7 @@ use parent qw(Exporter);
use XSLoader;
-our $VERSION = '0.19';
+our $VERSION = '0.20';
our $XS_VERSION = $VERSION;
$VERSION = eval $VERSION;
@@ -147,7 +147,7 @@ L<http://search.cpan.org/dist/Compress-Snappy/>
=head1 COPYRIGHT AND LICENSE
-Copyright (C) 2011-2012 gray <gray at cpan.org>, all rights reserved.
+Copyright (C) 2011-2013 gray <gray at cpan.org>, all rights reserved.
This library is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
View
2  src/csnappy.h
@@ -51,7 +51,7 @@ csnappy_compress_fragment(
* REQUIRES: working_memory has (1 << workmem_bytes_power_of_two) bytes.
* REQUIRES: 9 <= workmem_bytes_power_of_two <= 15.
*
- * Takes the data stored in "input[0..input_length]" and stores
+ * Takes the data stored in "input[0..input_length-1]" and stores
* it in the array pointed to by "compressed".
*
* "*out_compressed_length" is set to the length of the compressed output.
View
16 src/csnappy_compat.h
@@ -0,0 +1,16 @@
+#ifndef CSNAPPY_COMPAT_H
+
+/* This file was added to Sereal to attempt some MSVC compatibility,
+ * but is at best a band-aid. And done without a lot of experience
+ * in whatever subset of C99 MSVC supports.
+ */
+
+#ifndef INLINE
+# if defined(_MSC_VER)
+# define INLINE __inline
+# else
+# define INLINE inline
+# endif
+#endif
+
+#endif
View
29 src/csnappy_compress.c
@@ -30,6 +30,9 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
File modified for the Linux Kernel by
Zeev Tarantov <zeev.tarantov@gmail.com>
+
+File modified for Sereal by
+Steffen Mueller <smueller@cpan.org>
*/
#include "csnappy_internal.h"
@@ -40,7 +43,7 @@ Zeev Tarantov <zeev.tarantov@gmail.com>
#include "csnappy.h"
-static inline char*
+static INLINE char*
encode_varint32(char *sptr, uint32_t v)
{
uint8_t* ptr = (uint8_t *)sptr;
@@ -222,12 +225,12 @@ csnappy_compress_fragment(
* input. Of course, it doesn't hurt if the hash function is reasonably fast
* either, as it gets called a lot.
*/
-static inline uint32_t HashBytes(uint32_t bytes, int shift)
+static INLINE uint32_t HashBytes(uint32_t bytes, int shift)
{
uint32_t kMul = 0x1e35a7bd;
return (bytes * kMul) >> shift;
}
-static inline uint32_t Hash(const char *p, int shift)
+static INLINE uint32_t Hash(const char *p, int shift)
{
return HashBytes(UNALIGNED_LOAD32(p), shift);
}
@@ -247,7 +250,7 @@ static inline uint32_t Hash(const char *p, int shift)
* x86_64 is little endian.
*/
#if defined(__x86_64__)
-static inline int
+static INLINE int
FindMatchLength(const char *s1, const char *s2, const char *s2_limit)
{
uint64_t x;
@@ -291,7 +294,7 @@ FindMatchLength(const char *s1, const char *s2, const char *s2_limit)
return matched;
}
#else /* !defined(__x86_64__) */
-static inline int
+static INLINE int
FindMatchLength(const char *s1, const char *s2, const char *s2_limit)
{
/* Implementation based on the x86-64 version, above. */
@@ -326,7 +329,7 @@ FindMatchLength(const char *s1, const char *s2, const char *s2_limit)
#endif /* !defined(__x86_64__) */
-static inline char*
+static INLINE char*
EmitLiteral(char *op, const char *literal, int len, int allow_fast_path)
{
int n = len - 1; /* Zero-length literals are disallowed */
@@ -367,7 +370,7 @@ EmitLiteral(char *op, const char *literal, int len, int allow_fast_path)
return op + len;
}
-static inline char*
+static INLINE char*
EmitCopyLessThan64(char *op, int offset, int len)
{
DCHECK_LE(len, 64);
@@ -389,7 +392,7 @@ EmitCopyLessThan64(char *op, int offset, int len)
return op;
}
-static inline char*
+static INLINE char*
EmitCopy(char *op, int offset, int len)
{
/* Emit 64 byte copies but make sure to keep at least four bytes
@@ -420,7 +423,7 @@ empirically found that overlapping loads such as
are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
We have different versions for 64- and 32-bit; ideally we would avoid the
-two functions and just inline the UNALIGNED_LOAD64 call into
+two functions and just INLINE the UNALIGNED_LOAD64 call into
GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
enough to avoid loading the value multiple times then. For 64-bit, the load
is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
@@ -431,11 +434,11 @@ done at GetUint32AtOffset() time.
typedef uint64_t EightBytesReference;
-static inline EightBytesReference GetEightBytesAt(const char* ptr) {
+static INLINE EightBytesReference GetEightBytesAt(const char* ptr) {
return UNALIGNED_LOAD64(ptr);
}
-static inline uint32_t GetUint32AtOffset(uint64_t v, int offset) {
+static INLINE uint32_t GetUint32AtOffset(uint64_t v, int offset) {
DCHECK_GE(offset, 0);
DCHECK_LE(offset, 4);
#ifdef __LITTLE_ENDIAN
@@ -449,11 +452,11 @@ static inline uint32_t GetUint32AtOffset(uint64_t v, int offset) {
typedef const char* EightBytesReference;
-static inline EightBytesReference GetEightBytesAt(const char* ptr) {
+static INLINE EightBytesReference GetEightBytesAt(const char* ptr) {
return ptr;
}
-static inline uint32_t GetUint32AtOffset(const char* v, int offset) {
+static INLINE uint32_t GetUint32AtOffset(const char* v, int offset) {
DCHECK_GE(offset, 0);
DCHECK_LE(offset, 4);
return UNALIGNED_LOAD32(v + offset);
View
23 src/csnappy_decompress.c
@@ -30,6 +30,9 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
File modified for the Linux Kernel by
Zeev Tarantov <zeev.tarantov@gmail.com>
+
+File modified for Sereal by
+Steffen Mueller <smueller@cpan.org>
*/
#include "csnappy_internal.h"
@@ -194,7 +197,7 @@ static const uint16_t char_table[256] = {
* Note that this does not match the semantics of either memcpy()
* or memmove().
*/
-static inline void IncrementalCopy(const char *src, char *op, int len)
+static INLINE void IncrementalCopy(const char *src, char *op, int len)
{
DCHECK_GT(len, 0);
do {
@@ -235,7 +238,7 @@ static inline void IncrementalCopy(const char *src, char *op, int len)
* position 1. Thus, ten excess bytes.
*/
static const int kMaxIncrementCopyOverflow = 10;
-static inline void IncrementalCopyFastPath(const char *src, char *op, int len)
+static INLINE void IncrementalCopyFastPath(const char *src, char *op, int len)
{
while (op - src < 8) {
UnalignedCopy64(src, op);
@@ -258,7 +261,7 @@ struct SnappyArrayWriter {
char *op_limit;
};
-static inline int
+static INLINE int
SAW__AppendFastPath(struct SnappyArrayWriter *this,
const char *ip, uint32_t len)
{
@@ -268,7 +271,7 @@ SAW__AppendFastPath(struct SnappyArrayWriter *this,
UnalignedCopy64(ip, op);
UnalignedCopy64(ip + 8, op + 8);
} else {
- if (unlikely(space_left < len))
+ if (unlikely(space_left < (int32_t)len))
return CSNAPPY_E_OUTPUT_OVERRUN;
memcpy(op, ip, len);
}
@@ -276,20 +279,20 @@ SAW__AppendFastPath(struct SnappyArrayWriter *this,
return CSNAPPY_E_OK;
}
-static inline int
+static INLINE int
SAW__Append(struct SnappyArrayWriter *this,
const char *ip, uint32_t len)
{
char *op = this->op;
const int space_left = this->op_limit - op;
- if (unlikely(space_left < len))
+ if (unlikely(space_left < (int32_t)len))
return CSNAPPY_E_OUTPUT_OVERRUN;
memcpy(op, ip, len);
this->op = op + len;
return CSNAPPY_E_OK;
}
-static inline int
+static INLINE int
SAW__AppendFromSelf(struct SnappyArrayWriter *this,
uint32_t offset, uint32_t len)
{
@@ -302,10 +305,10 @@ SAW__AppendFromSelf(struct SnappyArrayWriter *this,
if (len <= 16 && offset >= 8 && space_left >= 16) {
UnalignedCopy64(op - offset, op);
UnalignedCopy64(op - offset + 8, op + 8);
- } else if (space_left >= len + kMaxIncrementCopyOverflow) {
+ } else if (space_left >= (int32_t)(len + kMaxIncrementCopyOverflow)) {
IncrementalCopyFastPath(op - offset, op, len);
} else {
- if (space_left < len)
+ if (space_left < (int32_t)len)
return CSNAPPY_E_OUTPUT_OVERRUN;
IncrementalCopy(op - offset, op, len);
}
@@ -368,7 +371,7 @@ csnappy_decompress_noheader(
src += extra_bytes;
available = end_minus5 + 5 - src;
}
- if (unlikely(available < length))
+ if (unlikely(available < (int32_t)length))
return CSNAPPY_E_DATA_MALFORMED;
ret = SAW__Append(&writer, src, length);
if (ret < 0)
View
15 src/csnappy_internal.h
@@ -31,11 +31,16 @@ Various stubs for the open-source version of Snappy.
File modified for the Linux Kernel by
Zeev Tarantov <zeev.tarantov@gmail.com>
+
+File modified for Sereal by
+Steffen Mueller <smueller@cpan.org>
*/
#ifndef CSNAPPY_INTERNAL_H_
#define CSNAPPY_INTERNAL_H_
+#include "csnappy_compat.h"
+
#ifndef __KERNEL__
#include "csnappy_internal_userspace.h"
#include <string.h>
@@ -77,11 +82,15 @@ Zeev Tarantov <zeev.tarantov@gmail.com>
#endif /* __KERNEL__ */
+#if (!defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)) || ! defined(__BYTE_ORDER)
+# error either __LITTLE_ENDIAN or __BIG_ENDIAN, plus __BYTE_ORDER must be defined
+#endif
+
#define ARCH_ARM_HAVE_UNALIGNED \
defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) || defined(__ARMV6__) || \
defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__)
-static inline void UnalignedCopy64(const void *src, void *dst) {
+static INLINE void UnalignedCopy64(const void *src, void *dst) {
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || ARCH_ARM_HAVE_UNALIGNED
if ((sizeof(void *) == 8) || (sizeof(long) == 8)) {
UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
@@ -110,7 +119,7 @@ static inline void UnalignedCopy64(const void *src, void *dst) {
#if defined(__arm__)
#if ARCH_ARM_HAVE_UNALIGNED
- static inline uint32_t get_unaligned_le(const void *p, uint32_t n)
+ static INLINE uint32_t get_unaligned_le(const void *p, uint32_t n)
{
uint32_t wordmask = (1U << (8 * n)) - 1;
return get_unaligned_le32(p) & wordmask;
@@ -120,7 +129,7 @@ static inline void UnalignedCopy64(const void *src, void *dst) {
#define get_unaligned_le get_unaligned_le_armv5
#endif
#else
- static inline uint32_t get_unaligned_le(const void *p, uint32_t n)
+ static INLINE uint32_t get_unaligned_le(const void *p, uint32_t n)
{
/* Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits */
static const uint32_t wordmask[] = {
View
72 src/csnappy_internal_userspace.h
@@ -31,16 +31,43 @@ Various stubs for the open-source version of Snappy.
File modified by
Zeev Tarantov <zeev.tarantov@gmail.com>
+
+File modified for Sereal by
+Steffen Mueller <smueller@cpan.org>
*/
#ifndef CSNAPPY_INTERNAL_USERSPACE_H_
#define CSNAPPY_INTERNAL_USERSPACE_H_
-#if defined(_MSC_VER) && (_MSC_VER <= 1300)
+/*note the original version of this file checked for MS version, but MS will *never* support
+ * anything but C89, so the version check is bogus. */
+#if defined(_MSC_VER)
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
+typedef __int32 int32_t; /* Sereal specific change, see csnappy_decompress.c(271) : error C2065: 'int32_t' : undeclared identifier */
+/* the following define is Sereal specific, as MS C89 compilers do not know about "inline" */
+#define inline __inline
+#ifdef _M_X64
+# define __x86_64__
+# define __x86_64
+# define __amd64__
+# define __amd64
+#endif
+#ifdef _M_IX86
+# define __i386__
+# define __i386
+# define i386
+# define _X86_
+#endif
+#ifdef _M_IA64
+# define __ia64__
+# define __ia64
+# define __IA64__
+# define __itanium__
+#endif
+
#else
#include <stdint.h>
#endif
@@ -70,6 +97,8 @@ typedef unsigned __int64 uint64_t;
#define DCHECK(cond)
#endif
+#include "csnappy_compat.h"
+
/*
Uses code from http://code.google.com/p/exfat/source/browse/trunk/libexfat/byteorder.h
with 3-clause BSD license instead of GPL, with permission from:
@@ -82,6 +111,9 @@ Albert Lee
#define bswap_16(x) _byteswap_ushort(x)
#define bswap_32(x) _byteswap_ulong(x)
#define bswap_64(x) _byteswap_uint64(x)
+#define __BIG_ENDIAN 4321
+#define __LITTLE_ENDIAN 1234
+#define __BYTE_ORDER LITTLE_ENDIAN
#elif defined(__GLIBC__) || defined(__ANDROID__) || defined(__CYGWIN__)
@@ -133,6 +165,12 @@ Albert Lee
#define __BYTE_ORDER __BIG_ENDIAN
#endif
+#elif defined(__MINGW32__)
+#include <sys/param.h>
+#define __BYTE_ORDER BYTE_ORDER
+#define __LITTLE_ENDIAN LITTLE_ENDIAN
+#define __BIG_ENDIAN BIG_ENDIAN
+
#endif
@@ -169,13 +207,13 @@ Albert Lee
struct una_u64 { uint64_t x; };
#pragma pack()
-static inline uint64_t UNALIGNED_LOAD64(const void *p)
+static INLINE uint64_t UNALIGNED_LOAD64(const void *p)
{
const struct una_u64 *ptr = (const struct una_u64 *)p;
return ptr->x;
}
-static inline void UNALIGNED_STORE64(void *p, uint64_t v)
+static INLINE void UNALIGNED_STORE64(void *p, uint64_t v)
{
struct una_u64 *ptr = (struct una_u64 *)p;
ptr->x = v;
@@ -189,37 +227,37 @@ struct una_u32 { uint32_t x; };
struct una_u64 { uint64_t x; };
#pragma pack()
-static inline uint16_t UNALIGNED_LOAD16(const void *p)
+static INLINE uint16_t UNALIGNED_LOAD16(const void *p)
{
const struct una_u16 *ptr = (const struct una_u16 *)p;
return ptr->x;
}
-static inline uint32_t UNALIGNED_LOAD32(const void *p)
+static INLINE uint32_t UNALIGNED_LOAD32(const void *p)
{
const struct una_u32 *ptr = (const struct una_u32 *)p;
return ptr->x;
}
-static inline uint64_t UNALIGNED_LOAD64(const void *p)
+static INLINE uint64_t UNALIGNED_LOAD64(const void *p)
{
const struct una_u64 *ptr = (const struct una_u64 *)p;
return ptr->x;
}
-static inline void UNALIGNED_STORE16(void *p, uint16_t v)
+static INLINE void UNALIGNED_STORE16(void *p, uint16_t v)
{
struct una_u16 *ptr = (struct una_u16 *)p;
ptr->x = v;
}
-static inline void UNALIGNED_STORE32(void *p, uint32_t v)
+static INLINE void UNALIGNED_STORE32(void *p, uint32_t v)
{
struct una_u32 *ptr = (struct una_u32 *)p;
ptr->x = v;
}
-static inline void UNALIGNED_STORE64(void *p, uint64_t v)
+static INLINE void UNALIGNED_STORE64(void *p, uint64_t v)
{
struct una_u64 *ptr = (struct una_u64 *)p;
ptr->x = v;
@@ -232,21 +270,21 @@ static inline void UNALIGNED_STORE64(void *p, uint64_t v)
#define get_unaligned_le32(p) UNALIGNED_LOAD32(p)
#define put_unaligned_le16(v, p) UNALIGNED_STORE16(p, v)
#elif __BYTE_ORDER == __BIG_ENDIAN
-static inline uint32_t get_unaligned_le32(const void *p)
+static INLINE uint32_t get_unaligned_le32(const void *p)
{
return bswap_32(UNALIGNED_LOAD32(p));
}
-static inline void put_unaligned_le16(uint16_t val, void *p)
+static INLINE void put_unaligned_le16(uint16_t val, void *p)
{
UNALIGNED_STORE16(p, bswap_16(val));
}
#else
-static inline uint32_t get_unaligned_le32(const void *p)
+static INLINE uint32_t get_unaligned_le32(const void *p)
{
const uint8_t *b = (const uint8_t *)p;
return b[0] | (b[1] << 8) | (b[2] << 16) | (b[3] << 24);
}
-static inline void put_unaligned_le16(uint16_t val, void *p)
+static INLINE void put_unaligned_le16(uint16_t val, void *p)
{
uint8_t *b = (uint8_t *)p;
b[0] = val & 255;
@@ -257,19 +295,19 @@ static inline void put_unaligned_le16(uint16_t val, void *p)
#if defined(HAVE_BUILTIN_CTZ)
-static inline int FindLSBSetNonZero(uint32_t n)
+static INLINE int FindLSBSetNonZero(uint32_t n)
{
return __builtin_ctz(n);
}
-static inline int FindLSBSetNonZero64(uint64_t n)
+static INLINE int FindLSBSetNonZero64(uint64_t n)
{
return __builtin_ctzll(n);
}
#else /* Portable versions. */
-static inline int FindLSBSetNonZero(uint32_t n)
+static INLINE int FindLSBSetNonZero(uint32_t n)
{
int rc = 31, i, shift;
uint32_t x;
@@ -285,7 +323,7 @@ static inline int FindLSBSetNonZero(uint32_t n)
}
/* FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero(). */
-static inline int FindLSBSetNonZero64(uint64_t n)
+static INLINE int FindLSBSetNonZero64(uint64_t n)
{
const uint32_t bottombits = (uint32_t)n;
if (bottombits == 0) {

No commit comments for this range

Something went wrong with that request. Please try again.