summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-28 16:15:25 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-28 16:15:25 -0700
commit7e0fb73c52c4037b4d5ef9ff56c7296a3151bd92 (patch)
tree9ab023505d388563d937b3c3ac26ef3c2045dba2 /fs
parent4e8440b3b6b801953b2e53c55491cf98fc8f6c01 (diff)
parent4684fe95300c071983f77653e354c040fe80a265 (diff)
Merge branch 'hash' of git://ftp.sciencehorizons.net/linux
Pull string hash improvements from George Spelvin: "This series does several related things: - Makes the dcache hash (fs/namei.c) useful for general kernel use. (Thanks to Bruce for noticing the zero-length corner case) - Converts the string hashes in <linux/sunrpc/svcauth.h> to use the above. - Avoids 64-bit multiplies in hash_64() on 32-bit platforms. Two 32-bit multiplies will do well enough. - Rids the world of the bad hash multipliers in hash_32. This finishes the job started in commit 689de1d6ca95 ("Minimal fix-up of bad hashing behavior of hash_64()") The vast majority of Linux architectures have hardware support for 32x32-bit multiply and so derive no benefit from "simplified" multipliers. The few processors that do not (68000, h8/300 and some models of Microblaze) have arch-specific implementations added. Those patches are last in the series. - Overhauls the dcache hash mixing. The patch in commit 0fed3ac866ea ("namei: Improve hash mixing if CONFIG_DCACHE_WORD_ACCESS") was an off-the-cuff suggestion. Replaced with a much more careful design that's simultaneously faster and better. (My own invention, as there was noting suitable in the literature I could find. Comments welcome!) - Modify the hash_name() loop to skip the initial HASH_MIX(). This would let us salt the hash if we ever wanted to. - Sort out partial_name_hash(). The hash function is declared as using a long state, even though it's truncated to 32 bits at the end and the extra internal state contributes nothing to the result. And some callers do odd things: - fs/hfs/string.c only allocates 32 bits of state - fs/hfsplus/unicode.c uses it to hash 16-bit unicode symbols not bytes - Modify bytemask_from_count to handle inputs of 1..sizeof(long) rather than 0..sizeof(long)-1. This would simplify users other than full_name_hash" Special thanks to Bruce Fields for testing and finding bugs in v1. (I learned some humbling lessons about "obviously correct" code.) On the arch-specific front, the m68k assembly has been tested in a standalone test harness, I've been in contact with the Microblaze maintainers who mostly don't care, as the hardware multiplier is never omitted in real-world applications, and I haven't heard anything from the H8/300 world" * 'hash' of git://ftp.sciencehorizons.net/linux: h8300: Add <asm/hash.h> microblaze: Add <asm/hash.h> m68k: Add <asm/hash.h> <linux/hash.h>: Add support for architecture-specific functions fs/namei.c: Improve dcache hash function Eliminate bad hash multipliers from hash_32() and hash_64() Change hash_64() return value to 32 bits <linux/sunrpc/svcauth.h>: Define hash_str() in terms of hashlen_string() fs/namei.c: Add hashlen_string() function Pull out string hash to <linux/stringhash.h>
Diffstat (limited to 'fs')
-rw-r--r--fs/dcache.c3
-rw-r--r--fs/namei.c162
2 files changed, 125 insertions, 40 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index c622872c12c5..ad4a542e9bab 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1670,8 +1670,7 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
struct qstr q;
q.name = name;
- q.len = strlen(name);
- q.hash = full_name_hash(q.name, q.len);
+ q.hash_len = hashlen_string(name);
return d_alloc(parent, &q);
}
EXPORT_SYMBOL(d_alloc_name);
diff --git a/fs/namei.c b/fs/namei.c
index 15b124c18ed8..e7bf99d387d0 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -35,6 +35,7 @@
#include <linux/fs_struct.h>
#include <linux/posix_acl.h>
#include <linux/hash.h>
+#include <linux/bitops.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -1797,74 +1798,144 @@ static int walk_component(struct nameidata *nd, int flags)
#include <asm/word-at-a-time.h>
-#ifdef CONFIG_64BIT
+#ifdef HASH_MIX
-static inline unsigned int fold_hash(unsigned long hash)
-{
- return hash_64(hash, 32);
-}
+/* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */
+#elif defined(CONFIG_64BIT)
/*
- * This is George Marsaglia's XORSHIFT generator.
- * It implements a maximum-period LFSR in only a few
- * instructions. It also has the property (required
- * by hash_name()) that mix_hash(0) = 0.
+ * Register pressure in the mixing function is an issue, particularly
+ * on 32-bit x86, but almost any function requires one state value and
+ * one temporary. Instead, use a function designed for two state values
+ * and no temporaries.
+ *
+ * This function cannot create a collision in only two iterations, so
+ * we have two iterations to achieve avalanche. In those two iterations,
+ * we have six layers of mixing, which is enough to spread one bit's
+ * influence out to 2^6 = 64 state bits.
+ *
+ * Rotate constants are scored by considering either 64 one-bit input
+ * deltas or 64*63/2 = 2016 two-bit input deltas, and finding the
+ * probability of that delta causing a change to each of the 128 output
+ * bits, using a sample of random initial states.
+ *
+ * The Shannon entropy of the computed probabilities is then summed
+ * to produce a score. Ideally, any input change has a 50% chance of
+ * toggling any given output bit.
+ *
+ * Mixing scores (in bits) for (12,45):
+ * Input delta: 1-bit 2-bit
+ * 1 round: 713.3 42542.6
+ * 2 rounds: 2753.7 140389.8
+ * 3 rounds: 5954.1 233458.2
+ * 4 rounds: 7862.6 256672.2
+ * Perfect: 8192 258048
+ * (64*128) (64*63/2 * 128)
*/
-static inline unsigned long mix_hash(unsigned long hash)
+#define HASH_MIX(x, y, a) \
+ ( x ^= (a), \
+ y ^= x, x = rol64(x,12),\
+ x += y, y = rol64(y,45),\
+ y *= 9 )
+
+/*
+ * Fold two longs into one 32-bit hash value. This must be fast, but
+ * latency isn't quite as critical, as there is a fair bit of additional
+ * work done before the hash value is used.
+ */
+static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
- hash ^= hash << 13;
- hash ^= hash >> 7;
- hash ^= hash << 17;
- return hash;
+ y ^= x * GOLDEN_RATIO_64;
+ y *= GOLDEN_RATIO_64;
+ return y >> 32;
}
#else /* 32-bit case */
-#define fold_hash(x) (x)
+/*
+ * Mixing scores (in bits) for (7,20):
+ * Input delta: 1-bit 2-bit
+ * 1 round: 330.3 9201.6
+ * 2 rounds: 1246.4 25475.4
+ * 3 rounds: 1907.1 31295.1
+ * 4 rounds: 2042.3 31718.6
+ * Perfect: 2048 31744
+ * (32*64) (32*31/2 * 64)
+ */
+#define HASH_MIX(x, y, a) \
+ ( x ^= (a), \
+ y ^= x, x = rol32(x, 7),\
+ x += y, y = rol32(y,20),\
+ y *= 9 )
-static inline unsigned long mix_hash(unsigned long hash)
+static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
- hash ^= hash << 13;
- hash ^= hash >> 17;
- hash ^= hash << 5;
- return hash;
+ /* Use arch-optimized multiply if one exists */
+ return __hash_32(y ^ __hash_32(x));
}
#endif
-unsigned int full_name_hash(const unsigned char *name, unsigned int len)
+/*
+ * Return the hash of a string of known length. This is carfully
+ * designed to match hash_name(), which is the more critical function.
+ * In particular, we must end by hashing a final word containing 0..7
+ * payload bytes, to match the way that hash_name() iterates until it
+ * finds the delimiter after the name.
+ */
+unsigned int full_name_hash(const char *name, unsigned int len)
{
- unsigned long a, hash = 0;
+ unsigned long a, x = 0, y = 0;
for (;;) {
+ if (!len)
+ goto done;
a = load_unaligned_zeropad(name);
if (len < sizeof(unsigned long))
break;
- hash = mix_hash(hash + a);
+ HASH_MIX(x, y, a);
name += sizeof(unsigned long);
len -= sizeof(unsigned long);
- if (!len)
- goto done;
}
- hash += a & bytemask_from_count(len);
+ x ^= a & bytemask_from_count(len);
done:
- return fold_hash(hash);
+ return fold_hash(x, y);
}
EXPORT_SYMBOL(full_name_hash);
+/* Return the "hash_len" (hash and length) of a null-terminated string */
+u64 hashlen_string(const char *name)
+{
+ unsigned long a = 0, x = 0, y = 0, adata, mask, len;
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+
+ len = -sizeof(unsigned long);
+ do {
+ HASH_MIX(x, y, a);
+ len += sizeof(unsigned long);
+ a = load_unaligned_zeropad(name+len);
+ } while (!has_zero(a, &adata, &constants));
+
+ adata = prep_zero_mask(a, adata, &constants);
+ mask = create_zero_mask(adata);
+ x ^= a & zero_bytemask(mask);
+
+ return hashlen_create(fold_hash(x, y), len + find_zero(mask));
+}
+EXPORT_SYMBOL(hashlen_string);
+
/*
* Calculate the length and hash of the path component, and
* return the "hash_len" as the result.
*/
static inline u64 hash_name(const char *name)
{
- unsigned long a, b, adata, bdata, mask, hash, len;
+ unsigned long a = 0, b, x = 0, y = 0, adata, bdata, mask, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- hash = a = 0;
len = -sizeof(unsigned long);
do {
- hash = mix_hash(hash + a);
+ HASH_MIX(x, y, a);
len += sizeof(unsigned long);
a = load_unaligned_zeropad(name+len);
b = a ^ REPEAT_BYTE('/');
@@ -1872,25 +1943,40 @@ static inline u64 hash_name(const char *name)
adata = prep_zero_mask(a, adata, &constants);
bdata = prep_zero_mask(b, bdata, &constants);
-
mask = create_zero_mask(adata | bdata);
+ x ^= a & zero_bytemask(mask);
- hash += a & zero_bytemask(mask);
- len += find_zero(mask);
- return hashlen_create(fold_hash(hash), len);
+ return hashlen_create(fold_hash(x, y), len + find_zero(mask));
}
-#else
+#else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
-unsigned int full_name_hash(const unsigned char *name, unsigned int len)
+/* Return the hash of a string of known length */
+unsigned int full_name_hash(const char *name, unsigned int len)
{
unsigned long hash = init_name_hash();
while (len--)
- hash = partial_name_hash(*name++, hash);
+ hash = partial_name_hash((unsigned char)*name++, hash);
return end_name_hash(hash);
}
EXPORT_SYMBOL(full_name_hash);
+/* Return the "hash_len" (hash and length) of a null-terminated string */
+u64 hash_string(const char *name)
+{
+ unsigned long hash = init_name_hash();
+ unsigned long len = 0, c;
+
+ c = (unsigned char)*name;
+ do {
+ len++;
+ hash = partial_name_hash(c, hash);
+ c = (unsigned char)name[len];
+ } while (c);
+ return hashlen_create(end_name_hash(hash), len);
+}
+EXPORT_SYMBOL(hash_string);
+
/*
* We know there's a real path component here of at least
* one character.
@@ -1934,7 +2020,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
int type;
err = may_lookup(nd);
- if (err)
+ if (err)
return err;
hash_len = hash_name(name);