diff options
-rw-r--r-- | lib/scudo/scudo_allocator.cpp | 16 | ||||
-rw-r--r-- | lib/scudo/scudo_tls.h | 2 | ||||
-rw-r--r-- | lib/scudo/scudo_utils.cpp | 36 | ||||
-rw-r--r-- | lib/scudo/scudo_utils.h | 57 |
4 files changed, 55 insertions, 56 deletions
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp index 00fa19218..ec9132f90 100644 --- a/lib/scudo/scudo_allocator.cpp +++ b/lib/scudo/scudo_allocator.cpp @@ -264,7 +264,7 @@ ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) { ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder); } -Xorshift128Plus *getPrng(ScudoThreadContext *ThreadContext) { +ScudoPrng *getPrng(ScudoThreadContext *ThreadContext) { return &ThreadContext->Prng; } @@ -283,7 +283,7 @@ struct ScudoAllocator { StaticSpinMutex FallbackMutex; AllocatorCache FallbackAllocatorCache; ScudoQuarantineCache FallbackQuarantineCache; - Xorshift128Plus FallbackPrng; + ScudoPrng FallbackPrng; bool DeallocationTypeMismatch; bool ZeroContents; @@ -333,8 +333,8 @@ struct ScudoAllocator { static_cast<uptr>(Options.QuarantineSizeMb) << 20, static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10); BackendAllocator.InitCache(&FallbackAllocatorCache); - FallbackPrng.initFromURandom(); - Cookie = FallbackPrng.getNext(); + FallbackPrng.init(); + Cookie = FallbackPrng.getU64(); } // Helper function that checks for a valid Scudo chunk. nullptr isn't. @@ -373,19 +373,19 @@ struct ScudoAllocator { bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment); void *Ptr; - uptr Salt; + u8 Salt; uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize; uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment; ScudoThreadContext *ThreadContext = getThreadContextAndLock(); if (LIKELY(ThreadContext)) { - Salt = getPrng(ThreadContext)->getNext(); + Salt = getPrng(ThreadContext)->getU8(); Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext), AllocationSize, AllocationAlignment, FromPrimary); ThreadContext->unlock(); } else { SpinMutexLock l(&FallbackMutex); - Salt = FallbackPrng.getNext(); + Salt = FallbackPrng.getU8(); Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize, AllocationAlignment, FromPrimary); } @@ -612,7 +612,7 @@ static void initScudoInternal(const AllocatorOptions &Options) { void ScudoThreadContext::init() { getBackendAllocator().InitCache(&Cache); - Prng.initFromURandom(); + Prng.init(); memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); } diff --git a/lib/scudo/scudo_tls.h b/lib/scudo/scudo_tls.h index f6039bebe..20c49204c 100644 --- a/lib/scudo/scudo_tls.h +++ b/lib/scudo/scudo_tls.h @@ -30,7 +30,7 @@ namespace __scudo { struct ALIGNED(64) ScudoThreadContext : public ScudoThreadContextPlatform { AllocatorCache Cache; - Xorshift128Plus Prng; + ScudoPrng Prng; uptr QuarantineCachePlaceHolder[4]; void init(); void commitBack(); diff --git a/lib/scudo/scudo_utils.cpp b/lib/scudo/scudo_utils.cpp index 31c391946..f7903ff34 100644 --- a/lib/scudo/scudo_utils.cpp +++ b/lib/scudo/scudo_utils.cpp @@ -123,40 +123,4 @@ bool testCPUFeature(CPUFeature Feature) { } #endif // defined(__x86_64__) || defined(__i386__) -// readRetry will attempt to read Count bytes from the Fd specified, and if -// interrupted will retry to read additional bytes to reach Count. -static ssize_t readRetry(int Fd, u8 *Buffer, size_t Count) { - ssize_t AmountRead = 0; - while (static_cast<size_t>(AmountRead) < Count) { - ssize_t Result = read(Fd, Buffer + AmountRead, Count - AmountRead); - if (Result > 0) - AmountRead += Result; - else if (!Result) - break; - else if (errno != EINTR) { - AmountRead = -1; - break; - } - } - return AmountRead; -} - -static void fillRandom(u8 *Data, ssize_t Size) { - int Fd = open("/dev/urandom", O_RDONLY); - if (Fd < 0) { - dieWithMessage("ERROR: failed to open /dev/urandom.\n"); - } - bool Success = readRetry(Fd, Data, Size) == Size; - close(Fd); - if (!Success) { - dieWithMessage("ERROR: failed to read enough data from /dev/urandom.\n"); - } -} - -// Seeds the xorshift state with /dev/urandom. -// TODO(kostyak): investigate using getrandom() if available. -void Xorshift128Plus::initFromURandom() { - fillRandom(reinterpret_cast<u8 *>(State), sizeof(State)); -} - } // namespace __scudo diff --git a/lib/scudo/scudo_utils.h b/lib/scudo/scudo_utils.h index 7198476f4..6c6c9d893 100644 --- a/lib/scudo/scudo_utils.h +++ b/lib/scudo/scudo_utils.h @@ -36,23 +36,58 @@ enum CPUFeature { }; bool testCPUFeature(CPUFeature feature); -// Tiny PRNG based on https://en.wikipedia.org/wiki/Xorshift#xorshift.2B -// The state (128 bits) will be stored in thread local storage. -struct Xorshift128Plus { +INLINE u64 rotl(const u64 X, int K) { + return (X << K) | (X >> (64 - K)); +} + +// XoRoShiRo128+ PRNG (http://xoroshiro.di.unimi.it/). +struct XoRoShiRo128Plus { public: - void initFromURandom(); - u64 getNext() { - u64 x = State[0]; - const u64 y = State[1]; - State[0] = y; - x ^= x << 23; - State[1] = x ^ y ^ (x >> 17) ^ (y >> 26); - return State[1] + y; + void init() { + if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(State), sizeof(State)))) { + // Early processes (eg: init) do not have /dev/urandom yet, but we still + // have to provide them with some degree of entropy. Not having a secure + // seed is not as problematic for them, as they are less likely to be + // the target of heap based vulnerabilities exploitation attempts. + State[0] = NanoTime(); + State[1] = 0; + } + fillCache(); } + u8 getU8() { + if (UNLIKELY(isCacheEmpty())) + fillCache(); + const u8 Result = static_cast<u8>(CachedBytes & 0xff); + CachedBytes >>= 8; + CachedBytesAvailable--; + return Result; + } + u64 getU64() { return next(); } + private: + u8 CachedBytesAvailable; + u64 CachedBytes; u64 State[2]; + u64 next() { + const u64 S0 = State[0]; + u64 S1 = State[1]; + const u64 Result = S0 + S1; + S1 ^= S0; + State[0] = rotl(S0, 55) ^ S1 ^ (S1 << 14); + State[1] = rotl(S1, 36); + return Result; + } + bool isCacheEmpty() { + return CachedBytesAvailable == 0; + } + void fillCache() { + CachedBytes = next(); + CachedBytesAvailable = sizeof(CachedBytes); + } }; +typedef XoRoShiRo128Plus ScudoPrng; + } // namespace __scudo #endif // SCUDO_UTILS_H_ |