* [PATCH v2 01/10] siphash: Make siphash functions consistently return 64-bit results
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
@ 2023-09-28 1:20 ` David Gibson
2023-09-28 1:20 ` [PATCH v2 02/10] siphash: Make sip round calculations an inline function rather than macro David Gibson
` (9 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: David Gibson @ 2023-09-28 1:20 UTC (permalink / raw)
To: Stefano Brivio, passt-dev; +Cc: David Gibson
Some of the siphas_*b() functions return 64-bit results, others 32-bit
results, with no obvious pattern. siphash_32b() also appears to do this
incorrectly - taking the 64-bit hash value and simply returning it
truncated, rather than folding the two halves together.
Since SipHash proper is defined to give a 64-bit hash, make all of them
return 64-bit results. In the one caller which needs a 32-bit value,
tcp_seq_init() do the fold down to 32-bits ourselves.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
siphash.c | 17 +++++++----------
siphash.h | 6 +++---
tcp.c | 7 ++++---
3 files changed, 14 insertions(+), 16 deletions(-)
diff --git a/siphash.c b/siphash.c
index e266e15..20009fe 100644
--- a/siphash.c
+++ b/siphash.c
@@ -61,7 +61,6 @@
uint64_t v[4] = { 0x736f6d6570736575ULL, 0x646f72616e646f6dULL, \
0x6c7967656e657261ULL, 0x7465646279746573ULL }; \
uint64_t b = (uint64_t)(len) << 56; \
- uint32_t ret; \
int __i; \
\
do { \
@@ -93,8 +92,6 @@
v[2] ^= 0xff; \
SIPROUND(4); \
b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \
- ret = (uint32_t)(b >> 32) ^ (uint32_t)b; \
- (void)ret; \
} while (0)
/**
@@ -132,12 +129,12 @@ uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
* @in: Input data (two addresses, two ports)
* @k: Hash function key, 128 bits
*
- * Return: 32 bits obtained by XORing the two halves of the 64-bit hash output
+ * Return: the 64-bit hash output
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
/* cppcheck-suppress unusedFunction */
-uint32_t siphash_12b(const uint8_t *in, const uint64_t *k)
+uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
uint64_t combined;
@@ -151,7 +148,7 @@ uint32_t siphash_12b(const uint8_t *in, const uint64_t *k)
b |= *(in32 + 2);
POSTAMBLE;
- return ret;
+ return b;
}
/**
@@ -194,7 +191,7 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
/* cppcheck-suppress unusedFunction */
-uint32_t siphash_32b(const uint8_t *in, const uint64_t *k)
+uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
{
uint64_t *in64 = (uint64_t *)in;
int i;
@@ -217,11 +214,11 @@ uint32_t siphash_32b(const uint8_t *in, const uint64_t *k)
* @in: Input data (two addresses, two ports)
* @k: Hash function key, 128 bits
*
- * Return: 32 bits obtained by XORing the two halves of the 64-bit hash output
+ * Return: the 64-bit hash output
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
-uint32_t siphash_36b(const uint8_t *in, const uint64_t *k)
+uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
int i;
@@ -239,5 +236,5 @@ uint32_t siphash_36b(const uint8_t *in, const uint64_t *k)
b |= *in32;
POSTAMBLE;
- return ret;
+ return b;
}
diff --git a/siphash.h b/siphash.h
index 5b0d0c3..de04c56 100644
--- a/siphash.h
+++ b/siphash.h
@@ -7,9 +7,9 @@
#define SIPHASH_H
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k);
-uint32_t siphash_12b(const uint8_t *in, const uint64_t *k);
+uint64_t siphash_12b(const uint8_t *in, const uint64_t *k);
uint64_t siphash_20b(const uint8_t *in, const uint64_t *k);
-uint32_t siphash_32b(const uint8_t *in, const uint64_t *k);
-uint32_t siphash_36b(const uint8_t *in, const uint64_t *k);
+uint64_t siphash_32b(const uint8_t *in, const uint64_t *k);
+uint64_t siphash_36b(const uint8_t *in, const uint64_t *k);
#endif /* SIPHASH_H */
diff --git a/tcp.c b/tcp.c
index 2933123..19baba5 100644
--- a/tcp.c
+++ b/tcp.c
@@ -1826,7 +1826,8 @@ static void tcp_seq_init(const struct ctx *c, struct tcp_tap_conn *conn,
.srcport = conn->fport,
.dstport = conn->eport,
};
- uint32_t ns, seq = 0;
+ uint64_t hash;
+ uint32_t ns;
if (CONN_V4(conn))
inany_from_af(&aany, AF_INET, &c->ip4.addr);
@@ -1834,12 +1835,12 @@ static void tcp_seq_init(const struct ctx *c, struct tcp_tap_conn *conn,
inany_from_af(&aany, AF_INET6, &c->ip6.addr);
in.dst = aany;
- seq = siphash_36b((uint8_t *)&in, c->tcp.hash_secret);
+ hash = siphash_36b((uint8_t *)&in, c->tcp.hash_secret);
/* 32ns ticks, overflows 32 bits every 137s */
ns = (now->tv_sec * 1000000000 + now->tv_nsec) >> 5;
- conn->seq_to_tap = seq + ns;
+ conn->seq_to_tap = ((uint32_t)(hash >> 32) ^ (uint32_t)hash) + ns;
}
/**
--
@@ -1826,7 +1826,8 @@ static void tcp_seq_init(const struct ctx *c, struct tcp_tap_conn *conn,
.srcport = conn->fport,
.dstport = conn->eport,
};
- uint32_t ns, seq = 0;
+ uint64_t hash;
+ uint32_t ns;
if (CONN_V4(conn))
inany_from_af(&aany, AF_INET, &c->ip4.addr);
@@ -1834,12 +1835,12 @@ static void tcp_seq_init(const struct ctx *c, struct tcp_tap_conn *conn,
inany_from_af(&aany, AF_INET6, &c->ip6.addr);
in.dst = aany;
- seq = siphash_36b((uint8_t *)&in, c->tcp.hash_secret);
+ hash = siphash_36b((uint8_t *)&in, c->tcp.hash_secret);
/* 32ns ticks, overflows 32 bits every 137s */
ns = (now->tv_sec * 1000000000 + now->tv_nsec) >> 5;
- conn->seq_to_tap = seq + ns;
+ conn->seq_to_tap = ((uint32_t)(hash >> 32) ^ (uint32_t)hash) + ns;
}
/**
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 02/10] siphash: Make sip round calculations an inline function rather than macro
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
2023-09-28 1:20 ` [PATCH v2 01/10] siphash: Make siphash functions consistently return 64-bit results David Gibson
@ 2023-09-28 1:20 ` David Gibson
2023-09-28 1:20 ` [PATCH v2 03/10] siphash: Add siphash_feed() helper David Gibson
` (8 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: David Gibson @ 2023-09-28 1:20 UTC (permalink / raw)
To: Stefano Brivio, passt-dev; +Cc: David Gibson
The SIPROUND(n) macro implements n rounds of SipHash shuffling. It relies
on 'v' and '__i' variables being available in the context it's used in
which isn't great hygeine. Replace it with an inline function instead.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
siphash.c | 51 +++++++++++++++++++++++++++++----------------------
1 file changed, 29 insertions(+), 22 deletions(-)
diff --git a/siphash.c b/siphash.c
index 20009fe..e1fcf18 100644
--- a/siphash.c
+++ b/siphash.c
@@ -68,29 +68,36 @@
v[__i] = k[__i % 2]; \
} while (0)
-#define SIPROUND(n) \
- do { \
- for (__i = 0; __i < (n); __i++) { \
- v[0] += v[1]; \
- v[1] = ROTL(v[1], 13) ^ v[0]; \
- v[0] = ROTL(v[0], 32); \
- v[2] += v[3]; \
- v[3] = ROTL(v[3], 16) ^ v[2]; \
- v[0] += v[3]; \
- v[3] = ROTL(v[3], 21) ^ v[0]; \
- v[2] += v[1]; \
- v[1] = ROTL(v[1], 17) ^ v[2]; \
- v[2] = ROTL(v[2], 32); \
- } \
- } while (0)
+/**
+ * sipround() - Perform rounds of SipHash scrambling
+ * @v: siphash state (4 x 64-bit integers)
+ * @n: Number of rounds to apply
+ */
+static inline void sipround(uint64_t *v, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ v[0] += v[1];
+ v[1] = ROTL(v[1], 13) ^ v[0];
+ v[0] = ROTL(v[0], 32);
+ v[2] += v[3];
+ v[3] = ROTL(v[3], 16) ^ v[2];
+ v[0] += v[3];
+ v[3] = ROTL(v[3], 21) ^ v[0];
+ v[2] += v[1];
+ v[1] = ROTL(v[1], 17) ^ v[2];
+ v[2] = ROTL(v[2], 32);
+ }
+}
#define POSTAMBLE \
do { \
v[3] ^= b; \
- SIPROUND(2); \
+ sipround(v, 2); \
v[0] ^= b; \
v[2] ^= 0xff; \
- SIPROUND(4); \
+ sipround(v, 4); \
b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \
} while (0)
@@ -117,7 +124,7 @@ uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
PREAMBLE(8);
v[3] ^= *(uint64_t *)in;
- SIPROUND(2);
+ sipround(v, 2);
v[0] ^= *(uint64_t *)in;
POSTAMBLE;
@@ -143,7 +150,7 @@ uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
PREAMBLE(12);
v[3] ^= combined;
- SIPROUND(2);
+ sipround(v, 2);
v[0] ^= combined;
b |= *(in32 + 2);
POSTAMBLE;
@@ -171,7 +178,7 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
v[3] ^= combined;
- SIPROUND(2);
+ sipround(v, 2);
v[0] ^= combined;
}
@@ -200,7 +207,7 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
for (i = 0; i < 4; i++, in64++) {
v[3] ^= *in64;
- SIPROUND(2);
+ sipround(v, 2);
v[0] ^= *in64;
}
@@ -229,7 +236,7 @@ uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
v[3] ^= combined;
- SIPROUND(2);
+ sipround(v, 2);
v[0] ^= combined;
}
--
@@ -68,29 +68,36 @@
v[__i] = k[__i % 2]; \
} while (0)
-#define SIPROUND(n) \
- do { \
- for (__i = 0; __i < (n); __i++) { \
- v[0] += v[1]; \
- v[1] = ROTL(v[1], 13) ^ v[0]; \
- v[0] = ROTL(v[0], 32); \
- v[2] += v[3]; \
- v[3] = ROTL(v[3], 16) ^ v[2]; \
- v[0] += v[3]; \
- v[3] = ROTL(v[3], 21) ^ v[0]; \
- v[2] += v[1]; \
- v[1] = ROTL(v[1], 17) ^ v[2]; \
- v[2] = ROTL(v[2], 32); \
- } \
- } while (0)
+/**
+ * sipround() - Perform rounds of SipHash scrambling
+ * @v: siphash state (4 x 64-bit integers)
+ * @n: Number of rounds to apply
+ */
+static inline void sipround(uint64_t *v, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ v[0] += v[1];
+ v[1] = ROTL(v[1], 13) ^ v[0];
+ v[0] = ROTL(v[0], 32);
+ v[2] += v[3];
+ v[3] = ROTL(v[3], 16) ^ v[2];
+ v[0] += v[3];
+ v[3] = ROTL(v[3], 21) ^ v[0];
+ v[2] += v[1];
+ v[1] = ROTL(v[1], 17) ^ v[2];
+ v[2] = ROTL(v[2], 32);
+ }
+}
#define POSTAMBLE \
do { \
v[3] ^= b; \
- SIPROUND(2); \
+ sipround(v, 2); \
v[0] ^= b; \
v[2] ^= 0xff; \
- SIPROUND(4); \
+ sipround(v, 4); \
b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \
} while (0)
@@ -117,7 +124,7 @@ uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
PREAMBLE(8);
v[3] ^= *(uint64_t *)in;
- SIPROUND(2);
+ sipround(v, 2);
v[0] ^= *(uint64_t *)in;
POSTAMBLE;
@@ -143,7 +150,7 @@ uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
PREAMBLE(12);
v[3] ^= combined;
- SIPROUND(2);
+ sipround(v, 2);
v[0] ^= combined;
b |= *(in32 + 2);
POSTAMBLE;
@@ -171,7 +178,7 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
v[3] ^= combined;
- SIPROUND(2);
+ sipround(v, 2);
v[0] ^= combined;
}
@@ -200,7 +207,7 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
for (i = 0; i < 4; i++, in64++) {
v[3] ^= *in64;
- SIPROUND(2);
+ sipround(v, 2);
v[0] ^= *in64;
}
@@ -229,7 +236,7 @@ uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
v[3] ^= combined;
- SIPROUND(2);
+ sipround(v, 2);
v[0] ^= combined;
}
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 03/10] siphash: Add siphash_feed() helper
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
2023-09-28 1:20 ` [PATCH v2 01/10] siphash: Make siphash functions consistently return 64-bit results David Gibson
2023-09-28 1:20 ` [PATCH v2 02/10] siphash: Make sip round calculations an inline function rather than macro David Gibson
@ 2023-09-28 1:20 ` David Gibson
2023-09-28 1:20 ` [PATCH v2 04/10] siphash: Clean up hash finalisation with posthash_final() function David Gibson
` (7 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: David Gibson @ 2023-09-28 1:20 UTC (permalink / raw)
To: Stefano Brivio, passt-dev; +Cc: David Gibson
We have macros or inlines for a number of common operations in the siphash
functions. However, in a number of places we still open code feeding
another 64-bits of data into the hash function: an xor, followed by 2
rounds of shuffling, followed by another xor.
Implement an inline function for this, which results in somewhat shortened
code.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
siphash.c | 52 +++++++++++++++++++++-------------------------------
1 file changed, 21 insertions(+), 31 deletions(-)
diff --git a/siphash.c b/siphash.c
index e1fcf18..716ab62 100644
--- a/siphash.c
+++ b/siphash.c
@@ -91,11 +91,21 @@ static inline void sipround(uint64_t *v, int n)
}
}
+/**
+ * siphash_feed() - Fold 64-bits of data into the hash state
+ * @v: siphash state (4 x 64-bit integers)
+ * @in: New value to fold into hash
+ */
+static inline void siphash_feed(uint64_t *v, uint64_t in)
+{
+ v[3] ^= in;
+ sipround(v, 2);
+ v[0] ^= in;
+}
+
#define POSTAMBLE \
do { \
- v[3] ^= b; \
- sipround(v, 2); \
- v[0] ^= b; \
+ siphash_feed(v, b); \
v[2] ^= 0xff; \
sipround(v, 4); \
b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \
@@ -123,9 +133,7 @@ __attribute__((optimize("-fno-strict-aliasing")))
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
PREAMBLE(8);
- v[3] ^= *(uint64_t *)in;
- sipround(v, 2);
- v[0] ^= *(uint64_t *)in;
+ siphash_feed(v, *(uint64_t *)in);
POSTAMBLE;
return b;
@@ -144,14 +152,9 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
- uint64_t combined;
-
- combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
PREAMBLE(12);
- v[3] ^= combined;
- sipround(v, 2);
- v[0] ^= combined;
+ siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
b |= *(in32 + 2);
POSTAMBLE;
@@ -174,13 +177,8 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
PREAMBLE(20);
- for (i = 0; i < 2; i++, in32 += 2) {
- uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
-
- v[3] ^= combined;
- sipround(v, 2);
- v[0] ^= combined;
- }
+ for (i = 0; i < 2; i++, in32 += 2)
+ siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
b |= *in32;
POSTAMBLE;
@@ -205,11 +203,8 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
PREAMBLE(32);
- for (i = 0; i < 4; i++, in64++) {
- v[3] ^= *in64;
- sipround(v, 2);
- v[0] ^= *in64;
- }
+ for (i = 0; i < 4; i++, in64++)
+ siphash_feed(v, *in64);
POSTAMBLE;
@@ -232,13 +227,8 @@ uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
PREAMBLE(36);
- for (i = 0; i < 4; i++, in32 += 2) {
- uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
-
- v[3] ^= combined;
- sipround(v, 2);
- v[0] ^= combined;
- }
+ for (i = 0; i < 4; i++, in32 += 2)
+ siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
b |= *in32;
POSTAMBLE;
--
@@ -91,11 +91,21 @@ static inline void sipround(uint64_t *v, int n)
}
}
+/**
+ * siphash_feed() - Fold 64-bits of data into the hash state
+ * @v: siphash state (4 x 64-bit integers)
+ * @in: New value to fold into hash
+ */
+static inline void siphash_feed(uint64_t *v, uint64_t in)
+{
+ v[3] ^= in;
+ sipround(v, 2);
+ v[0] ^= in;
+}
+
#define POSTAMBLE \
do { \
- v[3] ^= b; \
- sipround(v, 2); \
- v[0] ^= b; \
+ siphash_feed(v, b); \
v[2] ^= 0xff; \
sipround(v, 4); \
b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \
@@ -123,9 +133,7 @@ __attribute__((optimize("-fno-strict-aliasing")))
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
PREAMBLE(8);
- v[3] ^= *(uint64_t *)in;
- sipround(v, 2);
- v[0] ^= *(uint64_t *)in;
+ siphash_feed(v, *(uint64_t *)in);
POSTAMBLE;
return b;
@@ -144,14 +152,9 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
- uint64_t combined;
-
- combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
PREAMBLE(12);
- v[3] ^= combined;
- sipround(v, 2);
- v[0] ^= combined;
+ siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
b |= *(in32 + 2);
POSTAMBLE;
@@ -174,13 +177,8 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
PREAMBLE(20);
- for (i = 0; i < 2; i++, in32 += 2) {
- uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
-
- v[3] ^= combined;
- sipround(v, 2);
- v[0] ^= combined;
- }
+ for (i = 0; i < 2; i++, in32 += 2)
+ siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
b |= *in32;
POSTAMBLE;
@@ -205,11 +203,8 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
PREAMBLE(32);
- for (i = 0; i < 4; i++, in64++) {
- v[3] ^= *in64;
- sipround(v, 2);
- v[0] ^= *in64;
- }
+ for (i = 0; i < 4; i++, in64++)
+ siphash_feed(v, *in64);
POSTAMBLE;
@@ -232,13 +227,8 @@ uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
PREAMBLE(36);
- for (i = 0; i < 4; i++, in32 += 2) {
- uint64_t combined = (uint64_t)(*(in32 + 1)) << 32 | *in32;
-
- v[3] ^= combined;
- sipround(v, 2);
- v[0] ^= combined;
- }
+ for (i = 0; i < 4; i++, in32 += 2)
+ siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
b |= *in32;
POSTAMBLE;
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 04/10] siphash: Clean up hash finalisation with posthash_final() function
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
` (2 preceding siblings ...)
2023-09-28 1:20 ` [PATCH v2 03/10] siphash: Add siphash_feed() helper David Gibson
@ 2023-09-28 1:20 ` David Gibson
2023-09-28 1:20 ` [PATCH v2 05/10] siphash: Fix bug in state initialisation David Gibson
` (6 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: David Gibson @ 2023-09-28 1:20 UTC (permalink / raw)
To: Stefano Brivio, passt-dev; +Cc: David Gibson
The POSTAMBLE macro implements the finalisation steps of SipHash. It
relies on some variables in the environment, including returning the final
hash value that way. That isn't great hygeine.
In addition the PREAMBLE macro takes a length parameter which is used only
to initialize the 'b' value that's not used until the finalisation and is
also sometimes modified in a non-obvious way by the callers.
The 'b' value is always composed from the total length of the hash input
plus up to 7 bytes of "tail" data - that is the remainder of the hash input
after a multiple of 8 bytes has been consumed.
Simplify all this by replacing the POSTAMBLE macro with a siphash_final()
function which takes the length and tail data as parameters and returns the
final hash value.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
siphash.c | 58 +++++++++++++++++++++++++++----------------------------
1 file changed, 28 insertions(+), 30 deletions(-)
diff --git a/siphash.c b/siphash.c
index 716ab62..ec39793 100644
--- a/siphash.c
+++ b/siphash.c
@@ -51,16 +51,16 @@
*
*/
+#include <stddef.h>
#include <stdint.h>
#include "siphash.h"
#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
-#define PREAMBLE(len) \
+#define PREAMBLE \
uint64_t v[4] = { 0x736f6d6570736575ULL, 0x646f72616e646f6dULL, \
0x6c7967656e657261ULL, 0x7465646279746573ULL }; \
- uint64_t b = (uint64_t)(len) << 56; \
int __i; \
\
do { \
@@ -103,13 +103,21 @@ static inline void siphash_feed(uint64_t *v, uint64_t in)
v[0] ^= in;
}
-#define POSTAMBLE \
- do { \
- siphash_feed(v, b); \
- v[2] ^= 0xff; \
- sipround(v, 4); \
- b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \
- } while (0)
+/**
+ * siphash_final - Finalize SipHash calculations
+ * @v: siphash state (4 x 64-bit integers)
+ * @len: Total length of input data
+ * @tail: Final data for the hash (<= 7 bytes)
+ */
+static inline uint64_t siphash_final(uint64_t *v, size_t len, uint64_t tail)
+{
+ uint64_t b = (uint64_t)(len) << 56 | tail;
+
+ siphash_feed(v, b);
+ v[2] ^= 0xff;
+ sipround(v, 4);
+ return v[0] ^ v[1] ^ v[2] ^ v[3];
+}
/**
* siphash_8b() - Table index or timestamp offset for TCP over IPv4 (8 bytes in)
@@ -132,11 +140,11 @@ __attribute__((optimize("-fno-strict-aliasing")))
/* cppcheck-suppress unusedFunction */
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
- PREAMBLE(8);
+ PREAMBLE;
siphash_feed(v, *(uint64_t *)in);
- POSTAMBLE;
- return b;
+
+ return siphash_final(v, 8, 0);
}
/**
@@ -153,12 +161,10 @@ uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
- PREAMBLE(12);
+ PREAMBLE;
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- b |= *(in32 + 2);
- POSTAMBLE;
- return b;
+ return siphash_final(v, 12, *(in32 + 2));
}
/**
@@ -175,15 +181,12 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
uint32_t *in32 = (uint32_t *)in;
int i;
- PREAMBLE(20);
+ PREAMBLE;
for (i = 0; i < 2; i++, in32 += 2)
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- b |= *in32;
- POSTAMBLE;
-
- return b;
+ return siphash_final(v, 20, *in32);
}
/**
@@ -201,14 +204,12 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
uint64_t *in64 = (uint64_t *)in;
int i;
- PREAMBLE(32);
+ PREAMBLE;
for (i = 0; i < 4; i++, in64++)
siphash_feed(v, *in64);
- POSTAMBLE;
-
- return b;
+ return siphash_final(v, 32, 0);
}
/**
@@ -225,13 +226,10 @@ uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
uint32_t *in32 = (uint32_t *)in;
int i;
- PREAMBLE(36);
+ PREAMBLE;
for (i = 0; i < 4; i++, in32 += 2)
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- b |= *in32;
- POSTAMBLE;
-
- return b;
+ return siphash_final(v, 36, *in32);
}
--
@@ -51,16 +51,16 @@
*
*/
+#include <stddef.h>
#include <stdint.h>
#include "siphash.h"
#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
-#define PREAMBLE(len) \
+#define PREAMBLE \
uint64_t v[4] = { 0x736f6d6570736575ULL, 0x646f72616e646f6dULL, \
0x6c7967656e657261ULL, 0x7465646279746573ULL }; \
- uint64_t b = (uint64_t)(len) << 56; \
int __i; \
\
do { \
@@ -103,13 +103,21 @@ static inline void siphash_feed(uint64_t *v, uint64_t in)
v[0] ^= in;
}
-#define POSTAMBLE \
- do { \
- siphash_feed(v, b); \
- v[2] ^= 0xff; \
- sipround(v, 4); \
- b = (v[0] ^ v[1]) ^ (v[2] ^ v[3]); \
- } while (0)
+/**
+ * siphash_final - Finalize SipHash calculations
+ * @v: siphash state (4 x 64-bit integers)
+ * @len: Total length of input data
+ * @tail: Final data for the hash (<= 7 bytes)
+ */
+static inline uint64_t siphash_final(uint64_t *v, size_t len, uint64_t tail)
+{
+ uint64_t b = (uint64_t)(len) << 56 | tail;
+
+ siphash_feed(v, b);
+ v[2] ^= 0xff;
+ sipround(v, 4);
+ return v[0] ^ v[1] ^ v[2] ^ v[3];
+}
/**
* siphash_8b() - Table index or timestamp offset for TCP over IPv4 (8 bytes in)
@@ -132,11 +140,11 @@ __attribute__((optimize("-fno-strict-aliasing")))
/* cppcheck-suppress unusedFunction */
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
- PREAMBLE(8);
+ PREAMBLE;
siphash_feed(v, *(uint64_t *)in);
- POSTAMBLE;
- return b;
+
+ return siphash_final(v, 8, 0);
}
/**
@@ -153,12 +161,10 @@ uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
- PREAMBLE(12);
+ PREAMBLE;
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- b |= *(in32 + 2);
- POSTAMBLE;
- return b;
+ return siphash_final(v, 12, *(in32 + 2));
}
/**
@@ -175,15 +181,12 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
uint32_t *in32 = (uint32_t *)in;
int i;
- PREAMBLE(20);
+ PREAMBLE;
for (i = 0; i < 2; i++, in32 += 2)
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- b |= *in32;
- POSTAMBLE;
-
- return b;
+ return siphash_final(v, 20, *in32);
}
/**
@@ -201,14 +204,12 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
uint64_t *in64 = (uint64_t *)in;
int i;
- PREAMBLE(32);
+ PREAMBLE;
for (i = 0; i < 4; i++, in64++)
siphash_feed(v, *in64);
- POSTAMBLE;
-
- return b;
+ return siphash_final(v, 32, 0);
}
/**
@@ -225,13 +226,10 @@ uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
uint32_t *in32 = (uint32_t *)in;
int i;
- PREAMBLE(36);
+ PREAMBLE;
for (i = 0; i < 4; i++, in32 += 2)
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- b |= *in32;
- POSTAMBLE;
-
- return b;
+ return siphash_final(v, 36, *in32);
}
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 05/10] siphash: Fix bug in state initialisation
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
` (3 preceding siblings ...)
2023-09-28 1:20 ` [PATCH v2 04/10] siphash: Clean up hash finalisation with posthash_final() function David Gibson
@ 2023-09-28 1:20 ` David Gibson
2023-09-28 1:20 ` [PATCH v2 06/10] siphash: Use more hygienic state initialiser David Gibson
` (5 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: David Gibson @ 2023-09-28 1:20 UTC (permalink / raw)
To: Stefano Brivio, passt-dev; +Cc: David Gibson
The SipHash algorithm starts with initializing the 32 bytes of internal
state with some magic numbers XORed with the hash key. However, our
implementation has a bug - rather than XORing the hash key, it *sets* the
initial state to copies of the key.
I don't know if that affects any of the cryptographic properties of SipHash
but it's not what we should be doing. Fix it.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
siphash.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/siphash.c b/siphash.c
index ec39793..6932da2 100644
--- a/siphash.c
+++ b/siphash.c
@@ -65,7 +65,7 @@
\
do { \
for (__i = sizeof(v) / sizeof(v[0]) - 1; __i >= 0; __i--) \
- v[__i] = k[__i % 2]; \
+ v[__i] ^= k[__i % 2]; \
} while (0)
/**
--
@@ -65,7 +65,7 @@
\
do { \
for (__i = sizeof(v) / sizeof(v[0]) - 1; __i >= 0; __i--) \
- v[__i] = k[__i % 2]; \
+ v[__i] ^= k[__i % 2]; \
} while (0)
/**
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 06/10] siphash: Use more hygienic state initialiser
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
` (4 preceding siblings ...)
2023-09-28 1:20 ` [PATCH v2 05/10] siphash: Fix bug in state initialisation David Gibson
@ 2023-09-28 1:20 ` David Gibson
2023-09-28 1:20 ` [PATCH v2 07/10] siphash: Use specific structure for internal state David Gibson
` (4 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: David Gibson @ 2023-09-28 1:20 UTC (permalink / raw)
To: Stefano Brivio, passt-dev; +Cc: David Gibson
The PREAMBLE macro sets up the SipHash initial internal state. It also
defines that state as a variable, which isn't macro hygeinic. With
previous changes simplifying this premable, it's now possible to replace it
with a macro which simply expands to a C initialisedrfor that state.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
siphash.c | 29 ++++++++++++-----------------
1 file changed, 12 insertions(+), 17 deletions(-)
diff --git a/siphash.c b/siphash.c
index 6932da2..21c560d 100644
--- a/siphash.c
+++ b/siphash.c
@@ -58,15 +58,12 @@
#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
-#define PREAMBLE \
- uint64_t v[4] = { 0x736f6d6570736575ULL, 0x646f72616e646f6dULL, \
- 0x6c7967656e657261ULL, 0x7465646279746573ULL }; \
- int __i; \
- \
- do { \
- for (__i = sizeof(v) / sizeof(v[0]) - 1; __i >= 0; __i--) \
- v[__i] ^= k[__i % 2]; \
- } while (0)
+#define SIPHASH_INIT(k) { \
+ 0x736f6d6570736575ULL ^ (k)[0], \
+ 0x646f72616e646f6dULL ^ (k)[1], \
+ 0x6c7967656e657261ULL ^ (k)[0], \
+ 0x7465646279746573ULL ^ (k)[1] \
+ }
/**
* sipround() - Perform rounds of SipHash scrambling
@@ -140,7 +137,8 @@ __attribute__((optimize("-fno-strict-aliasing")))
/* cppcheck-suppress unusedFunction */
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
- PREAMBLE;
+ uint64_t v[4] = SIPHASH_INIT(k);
+
siphash_feed(v, *(uint64_t *)in);
@@ -160,8 +158,8 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
+ uint64_t v[4] = SIPHASH_INIT(k);
- PREAMBLE;
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
return siphash_final(v, 12, *(in32 + 2));
@@ -179,10 +177,9 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
+ uint64_t v[4] = SIPHASH_INIT(k);
int i;
- PREAMBLE;
-
for (i = 0; i < 2; i++, in32 += 2)
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
@@ -202,10 +199,9 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
{
uint64_t *in64 = (uint64_t *)in;
+ uint64_t v[4] = SIPHASH_INIT(k);
int i;
- PREAMBLE;
-
for (i = 0; i < 4; i++, in64++)
siphash_feed(v, *in64);
@@ -224,10 +220,9 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
+ uint64_t v[4] = SIPHASH_INIT(k);
int i;
- PREAMBLE;
-
for (i = 0; i < 4; i++, in32 += 2)
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
--
@@ -58,15 +58,12 @@
#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
-#define PREAMBLE \
- uint64_t v[4] = { 0x736f6d6570736575ULL, 0x646f72616e646f6dULL, \
- 0x6c7967656e657261ULL, 0x7465646279746573ULL }; \
- int __i; \
- \
- do { \
- for (__i = sizeof(v) / sizeof(v[0]) - 1; __i >= 0; __i--) \
- v[__i] ^= k[__i % 2]; \
- } while (0)
+#define SIPHASH_INIT(k) { \
+ 0x736f6d6570736575ULL ^ (k)[0], \
+ 0x646f72616e646f6dULL ^ (k)[1], \
+ 0x6c7967656e657261ULL ^ (k)[0], \
+ 0x7465646279746573ULL ^ (k)[1] \
+ }
/**
* sipround() - Perform rounds of SipHash scrambling
@@ -140,7 +137,8 @@ __attribute__((optimize("-fno-strict-aliasing")))
/* cppcheck-suppress unusedFunction */
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
- PREAMBLE;
+ uint64_t v[4] = SIPHASH_INIT(k);
+
siphash_feed(v, *(uint64_t *)in);
@@ -160,8 +158,8 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
+ uint64_t v[4] = SIPHASH_INIT(k);
- PREAMBLE;
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
return siphash_final(v, 12, *(in32 + 2));
@@ -179,10 +177,9 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
+ uint64_t v[4] = SIPHASH_INIT(k);
int i;
- PREAMBLE;
-
for (i = 0; i < 2; i++, in32 += 2)
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
@@ -202,10 +199,9 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
{
uint64_t *in64 = (uint64_t *)in;
+ uint64_t v[4] = SIPHASH_INIT(k);
int i;
- PREAMBLE;
-
for (i = 0; i < 4; i++, in64++)
siphash_feed(v, *in64);
@@ -224,10 +220,9 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
{
uint32_t *in32 = (uint32_t *)in;
+ uint64_t v[4] = SIPHASH_INIT(k);
int i;
- PREAMBLE;
-
for (i = 0; i < 4; i++, in32 += 2)
siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 07/10] siphash: Use specific structure for internal state
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
` (5 preceding siblings ...)
2023-09-28 1:20 ` [PATCH v2 06/10] siphash: Use more hygienic state initialiser David Gibson
@ 2023-09-28 1:20 ` David Gibson
2023-09-28 1:21 ` [PATCH v2 08/10] siphash: Make internal helpers public David Gibson
` (3 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: David Gibson @ 2023-09-28 1:20 UTC (permalink / raw)
To: Stefano Brivio, passt-dev; +Cc: David Gibson
To improve type safety, encapsulate the internal state of the SipHash
algorithm into a dedicated structure type.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
siphash.c | 80 +++++++++++++++++++++++++++++--------------------------
1 file changed, 42 insertions(+), 38 deletions(-)
diff --git a/siphash.c b/siphash.c
index 21c560d..66174c7 100644
--- a/siphash.c
+++ b/siphash.c
@@ -58,33 +58,37 @@
#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
-#define SIPHASH_INIT(k) { \
+struct siphash_state {
+ uint64_t v[4];
+};
+
+#define SIPHASH_INIT(k) { { \
0x736f6d6570736575ULL ^ (k)[0], \
0x646f72616e646f6dULL ^ (k)[1], \
0x6c7967656e657261ULL ^ (k)[0], \
0x7465646279746573ULL ^ (k)[1] \
- }
+ } }
/**
* sipround() - Perform rounds of SipHash scrambling
* @v: siphash state (4 x 64-bit integers)
* @n: Number of rounds to apply
*/
-static inline void sipround(uint64_t *v, int n)
+static inline void sipround(struct siphash_state *state, int n)
{
int i;
for (i = 0; i < n; i++) {
- v[0] += v[1];
- v[1] = ROTL(v[1], 13) ^ v[0];
- v[0] = ROTL(v[0], 32);
- v[2] += v[3];
- v[3] = ROTL(v[3], 16) ^ v[2];
- v[0] += v[3];
- v[3] = ROTL(v[3], 21) ^ v[0];
- v[2] += v[1];
- v[1] = ROTL(v[1], 17) ^ v[2];
- v[2] = ROTL(v[2], 32);
+ state->v[0] += state->v[1];
+ state->v[1] = ROTL(state->v[1], 13) ^ state->v[0];
+ state->v[0] = ROTL(state->v[0], 32);
+ state->v[2] += state->v[3];
+ state->v[3] = ROTL(state->v[3], 16) ^ state->v[2];
+ state->v[0] += state->v[3];
+ state->v[3] = ROTL(state->v[3], 21) ^ state->v[0];
+ state->v[2] += state->v[1];
+ state->v[1] = ROTL(state->v[1], 17) ^ state->v[2];
+ state->v[2] = ROTL(state->v[2], 32);
}
}
@@ -93,11 +97,11 @@ static inline void sipround(uint64_t *v, int n)
* @v: siphash state (4 x 64-bit integers)
* @in: New value to fold into hash
*/
-static inline void siphash_feed(uint64_t *v, uint64_t in)
+static inline void siphash_feed(struct siphash_state *state, uint64_t in)
{
- v[3] ^= in;
- sipround(v, 2);
- v[0] ^= in;
+ state->v[3] ^= in;
+ sipround(state, 2);
+ state->v[0] ^= in;
}
/**
@@ -106,14 +110,15 @@ static inline void siphash_feed(uint64_t *v, uint64_t in)
* @len: Total length of input data
* @tail: Final data for the hash (<= 7 bytes)
*/
-static inline uint64_t siphash_final(uint64_t *v, size_t len, uint64_t tail)
+static inline uint64_t siphash_final(struct siphash_state *state,
+ size_t len, uint64_t tail)
{
uint64_t b = (uint64_t)(len) << 56 | tail;
- siphash_feed(v, b);
- v[2] ^= 0xff;
- sipround(v, 4);
- return v[0] ^ v[1] ^ v[2] ^ v[3];
+ siphash_feed(state, b);
+ state->v[2] ^= 0xff;
+ sipround(state, 4);
+ return state->v[0] ^ state->v[1] ^ state->v[2] ^ state->v[3];
}
/**
@@ -137,12 +142,11 @@ __attribute__((optimize("-fno-strict-aliasing")))
/* cppcheck-suppress unusedFunction */
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
- uint64_t v[4] = SIPHASH_INIT(k);
-
- siphash_feed(v, *(uint64_t *)in);
+ struct siphash_state state = SIPHASH_INIT(k);
+ siphash_feed(&state, *(uint64_t *)in);
- return siphash_final(v, 8, 0);
+ return siphash_final(&state, 8, 0);
}
/**
@@ -157,12 +161,12 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
/* cppcheck-suppress unusedFunction */
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
+ struct siphash_state state = SIPHASH_INIT(k);
uint32_t *in32 = (uint32_t *)in;
- uint64_t v[4] = SIPHASH_INIT(k);
- siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
+ siphash_feed(&state, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- return siphash_final(v, 12, *(in32 + 2));
+ return siphash_final(&state, 12, *(in32 + 2));
}
/**
@@ -176,14 +180,14 @@ uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
{
+ struct siphash_state state = SIPHASH_INIT(k);
uint32_t *in32 = (uint32_t *)in;
- uint64_t v[4] = SIPHASH_INIT(k);
int i;
for (i = 0; i < 2; i++, in32 += 2)
- siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
+ siphash_feed(&state, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- return siphash_final(v, 20, *in32);
+ return siphash_final(&state, 20, *in32);
}
/**
@@ -198,14 +202,14 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
/* cppcheck-suppress unusedFunction */
uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
{
+ struct siphash_state state = SIPHASH_INIT(k);
uint64_t *in64 = (uint64_t *)in;
- uint64_t v[4] = SIPHASH_INIT(k);
int i;
for (i = 0; i < 4; i++, in64++)
- siphash_feed(v, *in64);
+ siphash_feed(&state, *in64);
- return siphash_final(v, 32, 0);
+ return siphash_final(&state, 32, 0);
}
/**
@@ -219,12 +223,12 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
{
+ struct siphash_state state = SIPHASH_INIT(k);
uint32_t *in32 = (uint32_t *)in;
- uint64_t v[4] = SIPHASH_INIT(k);
int i;
for (i = 0; i < 4; i++, in32 += 2)
- siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
+ siphash_feed(&state, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- return siphash_final(v, 36, *in32);
+ return siphash_final(&state, 36, *in32);
}
--
@@ -58,33 +58,37 @@
#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
-#define SIPHASH_INIT(k) { \
+struct siphash_state {
+ uint64_t v[4];
+};
+
+#define SIPHASH_INIT(k) { { \
0x736f6d6570736575ULL ^ (k)[0], \
0x646f72616e646f6dULL ^ (k)[1], \
0x6c7967656e657261ULL ^ (k)[0], \
0x7465646279746573ULL ^ (k)[1] \
- }
+ } }
/**
* sipround() - Perform rounds of SipHash scrambling
* @v: siphash state (4 x 64-bit integers)
* @n: Number of rounds to apply
*/
-static inline void sipround(uint64_t *v, int n)
+static inline void sipround(struct siphash_state *state, int n)
{
int i;
for (i = 0; i < n; i++) {
- v[0] += v[1];
- v[1] = ROTL(v[1], 13) ^ v[0];
- v[0] = ROTL(v[0], 32);
- v[2] += v[3];
- v[3] = ROTL(v[3], 16) ^ v[2];
- v[0] += v[3];
- v[3] = ROTL(v[3], 21) ^ v[0];
- v[2] += v[1];
- v[1] = ROTL(v[1], 17) ^ v[2];
- v[2] = ROTL(v[2], 32);
+ state->v[0] += state->v[1];
+ state->v[1] = ROTL(state->v[1], 13) ^ state->v[0];
+ state->v[0] = ROTL(state->v[0], 32);
+ state->v[2] += state->v[3];
+ state->v[3] = ROTL(state->v[3], 16) ^ state->v[2];
+ state->v[0] += state->v[3];
+ state->v[3] = ROTL(state->v[3], 21) ^ state->v[0];
+ state->v[2] += state->v[1];
+ state->v[1] = ROTL(state->v[1], 17) ^ state->v[2];
+ state->v[2] = ROTL(state->v[2], 32);
}
}
@@ -93,11 +97,11 @@ static inline void sipround(uint64_t *v, int n)
* @v: siphash state (4 x 64-bit integers)
* @in: New value to fold into hash
*/
-static inline void siphash_feed(uint64_t *v, uint64_t in)
+static inline void siphash_feed(struct siphash_state *state, uint64_t in)
{
- v[3] ^= in;
- sipround(v, 2);
- v[0] ^= in;
+ state->v[3] ^= in;
+ sipround(state, 2);
+ state->v[0] ^= in;
}
/**
@@ -106,14 +110,15 @@ static inline void siphash_feed(uint64_t *v, uint64_t in)
* @len: Total length of input data
* @tail: Final data for the hash (<= 7 bytes)
*/
-static inline uint64_t siphash_final(uint64_t *v, size_t len, uint64_t tail)
+static inline uint64_t siphash_final(struct siphash_state *state,
+ size_t len, uint64_t tail)
{
uint64_t b = (uint64_t)(len) << 56 | tail;
- siphash_feed(v, b);
- v[2] ^= 0xff;
- sipround(v, 4);
- return v[0] ^ v[1] ^ v[2] ^ v[3];
+ siphash_feed(state, b);
+ state->v[2] ^= 0xff;
+ sipround(state, 4);
+ return state->v[0] ^ state->v[1] ^ state->v[2] ^ state->v[3];
}
/**
@@ -137,12 +142,11 @@ __attribute__((optimize("-fno-strict-aliasing")))
/* cppcheck-suppress unusedFunction */
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
- uint64_t v[4] = SIPHASH_INIT(k);
-
- siphash_feed(v, *(uint64_t *)in);
+ struct siphash_state state = SIPHASH_INIT(k);
+ siphash_feed(&state, *(uint64_t *)in);
- return siphash_final(v, 8, 0);
+ return siphash_final(&state, 8, 0);
}
/**
@@ -157,12 +161,12 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
/* cppcheck-suppress unusedFunction */
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
+ struct siphash_state state = SIPHASH_INIT(k);
uint32_t *in32 = (uint32_t *)in;
- uint64_t v[4] = SIPHASH_INIT(k);
- siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
+ siphash_feed(&state, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- return siphash_final(v, 12, *(in32 + 2));
+ return siphash_final(&state, 12, *(in32 + 2));
}
/**
@@ -176,14 +180,14 @@ uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
{
+ struct siphash_state state = SIPHASH_INIT(k);
uint32_t *in32 = (uint32_t *)in;
- uint64_t v[4] = SIPHASH_INIT(k);
int i;
for (i = 0; i < 2; i++, in32 += 2)
- siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
+ siphash_feed(&state, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- return siphash_final(v, 20, *in32);
+ return siphash_final(&state, 20, *in32);
}
/**
@@ -198,14 +202,14 @@ __attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
/* cppcheck-suppress unusedFunction */
uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
{
+ struct siphash_state state = SIPHASH_INIT(k);
uint64_t *in64 = (uint64_t *)in;
- uint64_t v[4] = SIPHASH_INIT(k);
int i;
for (i = 0; i < 4; i++, in64++)
- siphash_feed(v, *in64);
+ siphash_feed(&state, *in64);
- return siphash_final(v, 32, 0);
+ return siphash_final(&state, 32, 0);
}
/**
@@ -219,12 +223,12 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
{
+ struct siphash_state state = SIPHASH_INIT(k);
uint32_t *in32 = (uint32_t *)in;
- uint64_t v[4] = SIPHASH_INIT(k);
int i;
for (i = 0; i < 4; i++, in32 += 2)
- siphash_feed(v, (uint64_t)(*(in32 + 1)) << 32 | *in32);
+ siphash_feed(&state, (uint64_t)(*(in32 + 1)) << 32 | *in32);
- return siphash_final(v, 36, *in32);
+ return siphash_final(&state, 36, *in32);
}
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 08/10] siphash: Make internal helpers public
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
` (6 preceding siblings ...)
2023-09-28 1:20 ` [PATCH v2 07/10] siphash: Use specific structure for internal state David Gibson
@ 2023-09-28 1:21 ` David Gibson
2023-09-28 1:21 ` [PATCH v2 09/10] siphash, checksum: Move TBAA explanation to checksum.c David Gibson
` (2 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: David Gibson @ 2023-09-28 1:21 UTC (permalink / raw)
To: Stefano Brivio, passt-dev; +Cc: David Gibson
Move a bunch of code from siphash.c to siphash.h, making it available to
other modules. This will allow places which need hashes of more complex
objects to construct them incrementally.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
siphash.c | 104 -------------------------------------------------
siphash.h | 113 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 111 insertions(+), 106 deletions(-)
diff --git a/siphash.c b/siphash.c
index 66174c7..91bcc5d 100644
--- a/siphash.c
+++ b/siphash.c
@@ -10,45 +10,6 @@
*
* Copyright (c) 2020-2021 Red Hat GmbH
* Author: Stefano Brivio <sbrivio@redhat.com>
- *
- * This is an implementation of the SipHash-2-4-64 functions needed for TCP
- * initial sequence numbers and socket lookup table hash for IPv4 and IPv6, see:
- *
- * Aumasson, J.P. and Bernstein, D.J., 2012, December. SipHash: a fast
- * short-input PRF. In International Conference on Cryptology in India
- * (pp. 489-508). Springer, Berlin, Heidelberg.
- *
- * http://cr.yp.to/siphash/siphash-20120918.pdf
- *
- * This includes code from the reference SipHash implementation at
- * https://github.com/veorq/SipHash/ originally licensed as follows:
- *
- * --
- * SipHash reference C implementation
- *
- * Copyright (c) 2012-2021 Jean-Philippe Aumasson
- * <jeanphilippe.aumasson@gmail.com>
- * Copyright (c) 2012-2014 Daniel J. Bernstein <djb@cr.yp.to>
- *
- * To the extent possible under law, the author(s) have dedicated all copyright
- * and related and neighboring rights to this software to the public domain
- * worldwide. This software is distributed without any warranty.
- *
- * You should have received a copy of the CC0 Public Domain Dedication along
- * with
- * this software. If not, see
- * <http://creativecommons.org/publicdomain/zero/1.0/>.
- * --
- *
- * and from the Linux kernel implementation (lib/siphash.c), originally licensed
- * as follows:
- *
- * --
- * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
- * --
- *
*/
#include <stddef.h>
@@ -56,71 +17,6 @@
#include "siphash.h"
-#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
-
-struct siphash_state {
- uint64_t v[4];
-};
-
-#define SIPHASH_INIT(k) { { \
- 0x736f6d6570736575ULL ^ (k)[0], \
- 0x646f72616e646f6dULL ^ (k)[1], \
- 0x6c7967656e657261ULL ^ (k)[0], \
- 0x7465646279746573ULL ^ (k)[1] \
- } }
-
-/**
- * sipround() - Perform rounds of SipHash scrambling
- * @v: siphash state (4 x 64-bit integers)
- * @n: Number of rounds to apply
- */
-static inline void sipround(struct siphash_state *state, int n)
-{
- int i;
-
- for (i = 0; i < n; i++) {
- state->v[0] += state->v[1];
- state->v[1] = ROTL(state->v[1], 13) ^ state->v[0];
- state->v[0] = ROTL(state->v[0], 32);
- state->v[2] += state->v[3];
- state->v[3] = ROTL(state->v[3], 16) ^ state->v[2];
- state->v[0] += state->v[3];
- state->v[3] = ROTL(state->v[3], 21) ^ state->v[0];
- state->v[2] += state->v[1];
- state->v[1] = ROTL(state->v[1], 17) ^ state->v[2];
- state->v[2] = ROTL(state->v[2], 32);
- }
-}
-
-/**
- * siphash_feed() - Fold 64-bits of data into the hash state
- * @v: siphash state (4 x 64-bit integers)
- * @in: New value to fold into hash
- */
-static inline void siphash_feed(struct siphash_state *state, uint64_t in)
-{
- state->v[3] ^= in;
- sipround(state, 2);
- state->v[0] ^= in;
-}
-
-/**
- * siphash_final - Finalize SipHash calculations
- * @v: siphash state (4 x 64-bit integers)
- * @len: Total length of input data
- * @tail: Final data for the hash (<= 7 bytes)
- */
-static inline uint64_t siphash_final(struct siphash_state *state,
- size_t len, uint64_t tail)
-{
- uint64_t b = (uint64_t)(len) << 56 | tail;
-
- siphash_feed(state, b);
- state->v[2] ^= 0xff;
- sipround(state, 4);
- return state->v[0] ^ state->v[1] ^ state->v[2] ^ state->v[3];
-}
-
/**
* siphash_8b() - Table index or timestamp offset for TCP over IPv4 (8 bytes in)
* @in: Input data (remote address and two ports, or two addresses)
diff --git a/siphash.h b/siphash.h
index de04c56..f966cdb 100644
--- a/siphash.h
+++ b/siphash.h
@@ -1,11 +1,120 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
- * Copyright (c) 2021 Red Hat GmbH
+ * Copyright Red Hat
* Author: Stefano Brivio <sbrivio@redhat.com>
- */
+ * Author: David Gibson <david@gibson.dropbear.id.au>
+ *
+ * This is an implementation of the SipHash-2-4-64 functions needed for TCP
+ * initial sequence numbers and socket lookup table hash for IPv4 and IPv6, see:
+ *
+ * Aumasson, J.P. and Bernstein, D.J., 2012, December. SipHash: a fast
+ * short-input PRF. In International Conference on Cryptology in India
+ * (pp. 489-508). Springer, Berlin, Heidelberg.
+ *
+ * http://cr.yp.to/siphash/siphash-20120918.pdf
+ *
+ * This includes code from the reference SipHash implementation at
+ * https://github.com/veorq/SipHash/ originally licensed as follows:
+ *
+ * --
+ * SipHash reference C implementation
+ *
+ * Copyright (c) 2012-2021 Jean-Philippe Aumasson <jeanphilippe.aumasson@gmail.com>
+ * Copyright (c) 2012-2014 Daniel J. Bernstein <djb@cr.yp.to>
+ *
+ * To the extent possible under law, the author(s) have dedicated all copyright
+ * and related and neighboring rights to this software to the public domain
+ * worldwide. This software is distributed without any warranty.
+ *
+ * You should have received a copy of the CC0 Public Domain Dedication along
+ * with this software. If not, see
+ * <http://creativecommons.org/publicdomain/zero/1.0/>.
+ * --
+ *
+ * and from the Linux kernel implementation (lib/siphash.c), originally licensed
+ * as follows:
+ *
+ * --
+ * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ * --
+ *
+*/
#ifndef SIPHASH_H
#define SIPHASH_H
+/**
+ * struct siphash_state - Internal state of siphash calculation
+ */
+struct siphash_state {
+ uint64_t v[4];
+};
+
+#define SIPHASH_INIT(k) { { \
+ 0x736f6d6570736575ULL ^ (k)[0], \
+ 0x646f72616e646f6dULL ^ (k)[1], \
+ 0x6c7967656e657261ULL ^ (k)[0], \
+ 0x7465646279746573ULL ^ (k)[1] \
+ } }
+
+/**
+ * sipround() - Perform rounds of SipHash scrambling
+ * @v: siphash state (4 x 64-bit integers)
+ * @n: Number of rounds to apply
+ */
+static inline void sipround(struct siphash_state *state, int n)
+{
+ int i;
+
+#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
+
+ for (i = 0; i < n; i++) {
+
+ state->v[0] += state->v[1];
+ state->v[1] = ROTL(state->v[1], 13) ^ state->v[0];
+ state->v[0] = ROTL(state->v[0], 32);
+ state->v[2] += state->v[3];
+ state->v[3] = ROTL(state->v[3], 16) ^ state->v[2];
+ state->v[0] += state->v[3];
+ state->v[3] = ROTL(state->v[3], 21) ^ state->v[0];
+ state->v[2] += state->v[1];
+ state->v[1] = ROTL(state->v[1], 17) ^ state->v[2];
+ state->v[2] = ROTL(state->v[2], 32);
+ }
+
+#undef ROTL
+}
+
+/**
+ * siphash_feed() - Fold 64-bits of data into the hash state
+ * @v: siphash state (4 x 64-bit integers)
+ * @in: New value to fold into hash
+ */
+static inline void siphash_feed(struct siphash_state *state, uint64_t in)
+{
+ state->v[3] ^= in;
+ sipround(state, 2);
+ state->v[0] ^= in;
+}
+
+/**
+ * siphash_final - Finalize SipHash calculations
+ * @v: siphash state (4 x 64-bit integers)
+ * @len: Total length of input data
+ * @tail: Final data for the hash (<= 7 bytes)
+ */
+static inline uint64_t siphash_final(struct siphash_state *state,
+ size_t len, uint64_t tail)
+{
+ uint64_t b = (uint64_t)(len) << 56 | tail;
+
+ siphash_feed(state, b);
+ state->v[2] ^= 0xff;
+ sipround(state, 4);
+ return state->v[0] ^ state->v[1] ^ state->v[2] ^ state->v[3];
+}
+
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k);
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k);
uint64_t siphash_20b(const uint8_t *in, const uint64_t *k);
--
@@ -1,11 +1,120 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
- * Copyright (c) 2021 Red Hat GmbH
+ * Copyright Red Hat
* Author: Stefano Brivio <sbrivio@redhat.com>
- */
+ * Author: David Gibson <david@gibson.dropbear.id.au>
+ *
+ * This is an implementation of the SipHash-2-4-64 functions needed for TCP
+ * initial sequence numbers and socket lookup table hash for IPv4 and IPv6, see:
+ *
+ * Aumasson, J.P. and Bernstein, D.J., 2012, December. SipHash: a fast
+ * short-input PRF. In International Conference on Cryptology in India
+ * (pp. 489-508). Springer, Berlin, Heidelberg.
+ *
+ * http://cr.yp.to/siphash/siphash-20120918.pdf
+ *
+ * This includes code from the reference SipHash implementation at
+ * https://github.com/veorq/SipHash/ originally licensed as follows:
+ *
+ * --
+ * SipHash reference C implementation
+ *
+ * Copyright (c) 2012-2021 Jean-Philippe Aumasson <jeanphilippe.aumasson@gmail.com>
+ * Copyright (c) 2012-2014 Daniel J. Bernstein <djb@cr.yp.to>
+ *
+ * To the extent possible under law, the author(s) have dedicated all copyright
+ * and related and neighboring rights to this software to the public domain
+ * worldwide. This software is distributed without any warranty.
+ *
+ * You should have received a copy of the CC0 Public Domain Dedication along
+ * with this software. If not, see
+ * <http://creativecommons.org/publicdomain/zero/1.0/>.
+ * --
+ *
+ * and from the Linux kernel implementation (lib/siphash.c), originally licensed
+ * as follows:
+ *
+ * --
+ * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ * --
+ *
+*/
#ifndef SIPHASH_H
#define SIPHASH_H
+/**
+ * struct siphash_state - Internal state of siphash calculation
+ */
+struct siphash_state {
+ uint64_t v[4];
+};
+
+#define SIPHASH_INIT(k) { { \
+ 0x736f6d6570736575ULL ^ (k)[0], \
+ 0x646f72616e646f6dULL ^ (k)[1], \
+ 0x6c7967656e657261ULL ^ (k)[0], \
+ 0x7465646279746573ULL ^ (k)[1] \
+ } }
+
+/**
+ * sipround() - Perform rounds of SipHash scrambling
+ * @v: siphash state (4 x 64-bit integers)
+ * @n: Number of rounds to apply
+ */
+static inline void sipround(struct siphash_state *state, int n)
+{
+ int i;
+
+#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
+
+ for (i = 0; i < n; i++) {
+
+ state->v[0] += state->v[1];
+ state->v[1] = ROTL(state->v[1], 13) ^ state->v[0];
+ state->v[0] = ROTL(state->v[0], 32);
+ state->v[2] += state->v[3];
+ state->v[3] = ROTL(state->v[3], 16) ^ state->v[2];
+ state->v[0] += state->v[3];
+ state->v[3] = ROTL(state->v[3], 21) ^ state->v[0];
+ state->v[2] += state->v[1];
+ state->v[1] = ROTL(state->v[1], 17) ^ state->v[2];
+ state->v[2] = ROTL(state->v[2], 32);
+ }
+
+#undef ROTL
+}
+
+/**
+ * siphash_feed() - Fold 64-bits of data into the hash state
+ * @v: siphash state (4 x 64-bit integers)
+ * @in: New value to fold into hash
+ */
+static inline void siphash_feed(struct siphash_state *state, uint64_t in)
+{
+ state->v[3] ^= in;
+ sipround(state, 2);
+ state->v[0] ^= in;
+}
+
+/**
+ * siphash_final - Finalize SipHash calculations
+ * @v: siphash state (4 x 64-bit integers)
+ * @len: Total length of input data
+ * @tail: Final data for the hash (<= 7 bytes)
+ */
+static inline uint64_t siphash_final(struct siphash_state *state,
+ size_t len, uint64_t tail)
+{
+ uint64_t b = (uint64_t)(len) << 56 | tail;
+
+ siphash_feed(state, b);
+ state->v[2] ^= 0xff;
+ sipround(state, 4);
+ return state->v[0] ^ state->v[1] ^ state->v[2] ^ state->v[3];
+}
+
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k);
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k);
uint64_t siphash_20b(const uint8_t *in, const uint64_t *k);
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 09/10] siphash, checksum: Move TBAA explanation to checksum.c
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
` (7 preceding siblings ...)
2023-09-28 1:21 ` [PATCH v2 08/10] siphash: Make internal helpers public David Gibson
@ 2023-09-28 1:21 ` David Gibson
2023-09-28 1:21 ` [PATCH v2 10/10] siphash: Use incremental rather than all-at-once siphash functions David Gibson
2023-09-30 10:43 ` [PATCH v2 00/10] siphash: cleanups and fixes Stefano Brivio
10 siblings, 0 replies; 12+ messages in thread
From: David Gibson @ 2023-09-28 1:21 UTC (permalink / raw)
To: Stefano Brivio, passt-dev; +Cc: David Gibson
A number of checksum and hash functions require workarounds for the odd
behaviour of Type-Baased Alias Analysis. We have a detailed comment about
this on siphash_8b() and other functions reference that.
Move the main comment to csume_16b() instead, because we're going to
reorganise things in siphash.c.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
checksum.c | 19 ++++++++++++++-----
siphash.c | 19 +++++--------------
2 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/checksum.c b/checksum.c
index f2b82a4..03b8a7c 100644
--- a/checksum.c
+++ b/checksum.c
@@ -69,8 +69,17 @@
*
* Return: 32-bit sum of 16-bit words
*/
+/* Type-Based Alias Analysis (TBAA) optimisation in gcc 11 and 12 (-flto -O2)
+ * makes these functions essentially useless by allowing reordering of stores of
+ * input data across function calls. Not even declaring @in as char pointer is
+ * enough: disable gcc's interpretation of strict aliasing altogether. See also:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106706
+ * https://stackoverflow.com/questions/2958633/gcc-strict-aliasing-and-horror-stories
+ * https://lore.kernel.org/all/alpine.LFD.2.00.0901121128080.6528__33422.5328093909$1232291247$gmane$org@localhost.localdomain/
+ */
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing")))
uint32_t sum_16b(const void *buf, size_t len)
{
const uint16_t *p = buf;
@@ -110,7 +119,7 @@ uint16_t csum_fold(uint32_t sum)
* Return: 16-bit IPv4-style checksum
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
uint16_t csum_unaligned(const void *buf, size_t len, uint32_t init)
{
return (uint16_t)~csum_fold(sum_16b(buf, len) + init);
@@ -247,7 +256,7 @@ void csum_icmp6(struct icmp6hdr *icmp6hr,
* - coding style adaptation
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
static uint32_t csum_avx2(const void *buf, size_t len, uint32_t init)
{
__m256i a, b, sum256, sum_a_hi, sum_a_lo, sum_b_hi, sum_b_lo, c, d;
@@ -395,7 +404,7 @@ less_than_128_bytes:
* Return: 16-bit folded, complemented checksum sum
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
uint16_t csum(const void *buf, size_t len, uint32_t init)
{
return (uint16_t)~csum_fold(csum_avx2(buf, len, init));
@@ -412,7 +421,7 @@ uint16_t csum(const void *buf, size_t len, uint32_t init)
* Return: 16-bit folded, complemented checksum
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
uint16_t csum(const void *buf, size_t len, uint32_t init)
{
return csum_unaligned(buf, len, init);
diff --git a/siphash.c b/siphash.c
index 91bcc5d..d2b068c 100644
--- a/siphash.c
+++ b/siphash.c
@@ -24,17 +24,8 @@
*
* Return: the 64-bit hash output
*/
-/* Type-Based Alias Analysis (TBAA) optimisation in gcc 11 and 12 (-flto -O2)
- * makes these functions essentially useless by allowing reordering of stores of
- * input data across function calls. Not even declaring @in as char pointer is
- * enough: disable gcc's interpretation of strict aliasing altogether. See also:
- *
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106706
- * https://stackoverflow.com/questions/2958633/gcc-strict-aliasing-and-horror-stories
- * https://lore.kernel.org/all/alpine.LFD.2.00.0901121128080.6528__33422.5328093909$1232291247$gmane$org@localhost.localdomain/
- */
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing")))
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
/* cppcheck-suppress unusedFunction */
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
@@ -53,7 +44,7 @@ uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
* Return: the 64-bit hash output
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
/* cppcheck-suppress unusedFunction */
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
@@ -73,7 +64,7 @@ uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
* Return: the 64-bit hash output
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
{
struct siphash_state state = SIPHASH_INIT(k);
@@ -94,7 +85,7 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
* Return: the 64-bit hash output
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
/* cppcheck-suppress unusedFunction */
uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
{
@@ -116,7 +107,7 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
* Return: the 64-bit hash output
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
{
struct siphash_state state = SIPHASH_INIT(k);
--
@@ -24,17 +24,8 @@
*
* Return: the 64-bit hash output
*/
-/* Type-Based Alias Analysis (TBAA) optimisation in gcc 11 and 12 (-flto -O2)
- * makes these functions essentially useless by allowing reordering of stores of
- * input data across function calls. Not even declaring @in as char pointer is
- * enough: disable gcc's interpretation of strict aliasing altogether. See also:
- *
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106706
- * https://stackoverflow.com/questions/2958633/gcc-strict-aliasing-and-horror-stories
- * https://lore.kernel.org/all/alpine.LFD.2.00.0901121128080.6528__33422.5328093909$1232291247$gmane$org@localhost.localdomain/
- */
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing")))
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
/* cppcheck-suppress unusedFunction */
uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
{
@@ -53,7 +44,7 @@ uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
* Return: the 64-bit hash output
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
/* cppcheck-suppress unusedFunction */
uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
{
@@ -73,7 +64,7 @@ uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
* Return: the 64-bit hash output
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
{
struct siphash_state state = SIPHASH_INIT(k);
@@ -94,7 +85,7 @@ uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
* Return: the 64-bit hash output
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
/* cppcheck-suppress unusedFunction */
uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
{
@@ -116,7 +107,7 @@ uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
* Return: the 64-bit hash output
*/
/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See siphash_8b() */
+__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
{
struct siphash_state state = SIPHASH_INIT(k);
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 10/10] siphash: Use incremental rather than all-at-once siphash functions
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
` (8 preceding siblings ...)
2023-09-28 1:21 ` [PATCH v2 09/10] siphash, checksum: Move TBAA explanation to checksum.c David Gibson
@ 2023-09-28 1:21 ` David Gibson
2023-09-30 10:43 ` [PATCH v2 00/10] siphash: cleanups and fixes Stefano Brivio
10 siblings, 0 replies; 12+ messages in thread
From: David Gibson @ 2023-09-28 1:21 UTC (permalink / raw)
To: Stefano Brivio, passt-dev; +Cc: David Gibson
We have a bunch of variants of the siphash functions for different data
sizes. The callers, in tcp.c, need to pack the various values they want to
hash into a temporary structure, then call the appropriate version. We can
avoid the copy into the temporary by directly using the incremental
siphash functions.
The length specific hash functions also have an undocumented constraint
that the data pointer they take must, in fact, be aligned to avoid
unaligned accesses, which may cause crashes on some architectures.
So, prefer the incremental approach and remove the length-specific
functions.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
Makefile | 2 +-
inany.h | 17 +++++++-
siphash.c | 121 ---------------------------------------------------
tcp.c | 32 +++++---------
tcp_splice.c | 1 +
5 files changed, 28 insertions(+), 145 deletions(-)
delete mode 100644 siphash.c
diff --git a/Makefile b/Makefile
index c28556f..af3bc75 100644
--- a/Makefile
+++ b/Makefile
@@ -45,7 +45,7 @@ FLAGS += -DDUAL_STACK_SOCKETS=$(DUAL_STACK_SOCKETS)
PASST_SRCS = arch.c arp.c checksum.c conf.c dhcp.c dhcpv6.c icmp.c igmp.c \
isolation.c lineread.c log.c mld.c ndp.c netlink.c packet.c passt.c \
- pasta.c pcap.c siphash.c tap.c tcp.c tcp_splice.c udp.c util.c
+ pasta.c pcap.c tap.c tcp.c tcp_splice.c udp.c util.c
QRAP_SRCS = qrap.c
SRCS = $(PASST_SRCS) $(QRAP_SRCS)
diff --git a/inany.h b/inany.h
index aadb20b..85a18e3 100644
--- a/inany.h
+++ b/inany.h
@@ -14,8 +14,9 @@
* @v4mapped.zero: All zero-bits for an IPv4 address
* @v4mapped.one: All one-bits for an IPv4 address
* @v4mapped.a4: If @a6 is an IPv4 mapped address, the IPv4 address
+ * @u64: As an array of u64s (solely for hashing)
*
- * @v4mapped shouldn't be accessed except via helpers.
+ * @v4mapped and @u64 shouldn't be accessed except via helpers.
*/
union inany_addr {
struct in6_addr a6;
@@ -24,7 +25,10 @@ union inany_addr {
uint8_t one[2];
struct in_addr a4;
} v4mapped;
+ uint32_t u32[4];
};
+static_assert(sizeof(union inany_addr) == sizeof(struct in6_addr));
+static_assert(_Alignof(union inany_addr) == _Alignof(uint32_t));
/** inany_v4 - Extract IPv4 address, if present, from IPv[46] address
* @addr: IPv4 or IPv6 address
@@ -94,4 +98,15 @@ static inline void inany_from_sockaddr(union inany_addr *aa, in_port_t *port,
}
}
+/** inany_siphash_feed- Fold IPv[46] address into an in-progress siphash
+ * @state: siphash state
+ * @aa: inany to hash
+ */
+static inline void inany_siphash_feed(struct siphash_state *state,
+ const union inany_addr *aa)
+{
+ siphash_feed(state, (uint64_t)aa->u32[0] << 32 | aa->u32[1]);
+ siphash_feed(state, (uint64_t)aa->u32[2] << 32 | aa->u32[3]);
+}
+
#endif /* INANY_H */
diff --git a/siphash.c b/siphash.c
deleted file mode 100644
index d2b068c..0000000
--- a/siphash.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-/* PASST - Plug A Simple Socket Transport
- * for qemu/UNIX domain socket mode
- *
- * PASTA - Pack A Subtle Tap Abstraction
- * for network namespace/tap device mode
- *
- * siphash.c - SipHash routines
- *
- * Copyright (c) 2020-2021 Red Hat GmbH
- * Author: Stefano Brivio <sbrivio@redhat.com>
- */
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "siphash.h"
-
-/**
- * siphash_8b() - Table index or timestamp offset for TCP over IPv4 (8 bytes in)
- * @in: Input data (remote address and two ports, or two addresses)
- * @k: Hash function key, 128 bits
- *
- * Return: the 64-bit hash output
- */
-/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
-/* cppcheck-suppress unusedFunction */
-uint64_t siphash_8b(const uint8_t *in, const uint64_t *k)
-{
- struct siphash_state state = SIPHASH_INIT(k);
-
- siphash_feed(&state, *(uint64_t *)in);
-
- return siphash_final(&state, 8, 0);
-}
-
-/**
- * siphash_12b() - Initial sequence number for TCP over IPv4 (12 bytes in)
- * @in: Input data (two addresses, two ports)
- * @k: Hash function key, 128 bits
- *
- * Return: the 64-bit hash output
- */
-/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
-/* cppcheck-suppress unusedFunction */
-uint64_t siphash_12b(const uint8_t *in, const uint64_t *k)
-{
- struct siphash_state state = SIPHASH_INIT(k);
- uint32_t *in32 = (uint32_t *)in;
-
- siphash_feed(&state, (uint64_t)(*(in32 + 1)) << 32 | *in32);
-
- return siphash_final(&state, 12, *(in32 + 2));
-}
-
-/**
- * siphash_20b() - Table index for TCP over IPv6 (20 bytes in)
- * @in: Input data (remote address, two ports)
- * @k: Hash function key, 128 bits
- *
- * Return: the 64-bit hash output
- */
-/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
-uint64_t siphash_20b(const uint8_t *in, const uint64_t *k)
-{
- struct siphash_state state = SIPHASH_INIT(k);
- uint32_t *in32 = (uint32_t *)in;
- int i;
-
- for (i = 0; i < 2; i++, in32 += 2)
- siphash_feed(&state, (uint64_t)(*(in32 + 1)) << 32 | *in32);
-
- return siphash_final(&state, 20, *in32);
-}
-
-/**
- * siphash_32b() - Timestamp offset for TCP over IPv6 (32 bytes in)
- * @in: Input data (two addresses)
- * @k: Hash function key, 128 bits
- *
- * Return: the 64-bit hash output
- */
-/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
-/* cppcheck-suppress unusedFunction */
-uint64_t siphash_32b(const uint8_t *in, const uint64_t *k)
-{
- struct siphash_state state = SIPHASH_INIT(k);
- uint64_t *in64 = (uint64_t *)in;
- int i;
-
- for (i = 0; i < 4; i++, in64++)
- siphash_feed(&state, *in64);
-
- return siphash_final(&state, 32, 0);
-}
-
-/**
- * siphash_36b() - Initial sequence number for TCP over IPv6 (36 bytes in)
- * @in: Input data (two addresses, two ports)
- * @k: Hash function key, 128 bits
- *
- * Return: the 64-bit hash output
- */
-/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
-__attribute__((optimize("-fno-strict-aliasing"))) /* See csum_16b() */
-uint64_t siphash_36b(const uint8_t *in, const uint64_t *k)
-{
- struct siphash_state state = SIPHASH_INIT(k);
- uint32_t *in32 = (uint32_t *)in;
- int i;
-
- for (i = 0; i < 4; i++, in32 += 2)
- siphash_feed(&state, (uint64_t)(*(in32 + 1)) << 32 | *in32);
-
- return siphash_final(&state, 36, *in32);
-}
diff --git a/tcp.c b/tcp.c
index 19baba5..680874f 100644
--- a/tcp.c
+++ b/tcp.c
@@ -1165,18 +1165,13 @@ static int tcp_hash_match(const struct tcp_tap_conn *conn,
static unsigned int tcp_hash(const struct ctx *c, const union inany_addr *faddr,
in_port_t eport, in_port_t fport)
{
- struct {
- union inany_addr faddr;
- in_port_t eport;
- in_port_t fport;
- } __attribute__((__packed__)) in = {
- *faddr, eport, fport
- };
- uint64_t b = 0;
+ struct siphash_state state = SIPHASH_INIT(c->tcp.hash_secret);
+ uint64_t hash;
- b = siphash_20b((uint8_t *)&in, c->tcp.hash_secret);
+ inany_siphash_feed(&state, faddr);
+ hash = siphash_final(&state, 20, (uint64_t)eport << 16 | fport);
- return (unsigned int)(b % TCP_HASH_TABLE_SIZE);
+ return (unsigned int)(hash % TCP_HASH_TABLE_SIZE);
}
/**
@@ -1815,17 +1810,8 @@ static void tcp_clamp_window(const struct ctx *c, struct tcp_tap_conn *conn,
static void tcp_seq_init(const struct ctx *c, struct tcp_tap_conn *conn,
const struct timespec *now)
{
+ struct siphash_state state = SIPHASH_INIT(c->tcp.hash_secret);
union inany_addr aany;
- struct {
- union inany_addr src;
- in_port_t srcport;
- union inany_addr dst;
- in_port_t dstport;
- } __attribute__((__packed__)) in = {
- .src = conn->faddr,
- .srcport = conn->fport,
- .dstport = conn->eport,
- };
uint64_t hash;
uint32_t ns;
@@ -1833,9 +1819,11 @@ static void tcp_seq_init(const struct ctx *c, struct tcp_tap_conn *conn,
inany_from_af(&aany, AF_INET, &c->ip4.addr);
else
inany_from_af(&aany, AF_INET6, &c->ip6.addr);
- in.dst = aany;
- hash = siphash_36b((uint8_t *)&in, c->tcp.hash_secret);
+ inany_siphash_feed(&state, &conn->faddr);
+ inany_siphash_feed(&state, &aany);
+ hash = siphash_final(&state, 36,
+ (uint64_t)conn->fport << 16 | conn->eport);
/* 32ns ticks, overflows 32 bits every 137s */
ns = (now->tv_sec * 1000000000 + now->tv_nsec) >> 5;
diff --git a/tcp_splice.c b/tcp_splice.c
index be01908..a572aca 100644
--- a/tcp_splice.c
+++ b/tcp_splice.c
@@ -52,6 +52,7 @@
#include "passt.h"
#include "log.h"
#include "tcp_splice.h"
+#include "siphash.h"
#include "inany.h"
#include "tcp_conn.h"
--
@@ -52,6 +52,7 @@
#include "passt.h"
#include "log.h"
#include "tcp_splice.h"
+#include "siphash.h"
#include "inany.h"
#include "tcp_conn.h"
--
2.41.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH v2 00/10] siphash: cleanups and fixes
2023-09-28 1:20 [PATCH v2 00/10] siphash: cleanups and fixes David Gibson
` (9 preceding siblings ...)
2023-09-28 1:21 ` [PATCH v2 10/10] siphash: Use incremental rather than all-at-once siphash functions David Gibson
@ 2023-09-30 10:43 ` Stefano Brivio
10 siblings, 0 replies; 12+ messages in thread
From: Stefano Brivio @ 2023-09-30 10:43 UTC (permalink / raw)
To: David Gibson; +Cc: passt-dev
On Thu, 28 Sep 2023 11:20:52 +1000
David Gibson <david@gibson.dropbear.id.au> wrote:
> While working on unifying the hashing for the flow table, I noticed
> some awkwardness in the siphash functions. While looking into that I
> noticed some bugs. So.. here we are.
>
> Changes since v1:
> * Don't accidentally increase the alignment of union inany_addr
>
> David Gibson (10):
> siphash: Make siphash functions consistently return 64-bit results
> siphash: Make sip round calculations an inline function rather than
> macro
> siphash: Add siphash_feed() helper
> siphash: Clean up hash finalisation with posthash_final() function
> siphash: Fix bug in state initialisation
> siphash: Use more hygienic state initialiser
> siphash: Use specific structure for internal state
> siphash: Make internal helpers public
> siphash, checksum: Move TBAA explanation to checksum.c
> siphash: Use incremental rather than all-at-once siphash functions
Applied, thanks.
--
Stefano
^ permalink raw reply [flat|nested] 12+ messages in thread