public inbox for passt-dev@passt.top
 help / color / mirror / code / Atom feed
* checksum: add VSX fast path for POWER8/POWER9
@ 2026-02-05  6:14 jfiusdq
  2026-02-05 18:03 ` Stefano Brivio
  2026-02-06 15:17 ` Laurent Vivier
  0 siblings, 2 replies; 6+ messages in thread
From: jfiusdq @ 2026-02-05  6:14 UTC (permalink / raw)
  To: passt-dev

[-- Attachment #1: Type: text/plain, Size: 152 bytes --]

Tested with podman on Debian 13 for a while and works ok. It's difficult to run all the tests on POWER but 505-networking-pasta.bats test suite passes.

[-- Attachment #2: 0001-checksum-add-VSX-fast-path-for-POWER8-POWER9.patch --]
[-- Type: text/x-patch, Size: 4506 bytes --]

>From b431f05c171a1c9b63ca8a57001188f782354a6a Mon Sep 17 00:00:00 2001
From: user <user@localhost>
Date: Thu, 5 Feb 2026 06:36:27 +0100
Subject: [PATCH] checksum: add VSX fast path for POWER8/POWER9

---
 checksum.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 107 insertions(+), 3 deletions(-)

diff --git a/checksum.c b/checksum.c
index 0c3837c..828f9ec 100644
--- a/checksum.c
+++ b/checksum.c
@@ -281,7 +281,7 @@ void csum_icmp6(struct icmp6hdr *icmp6hr,
 	icmp6hr->icmp6_cksum = csum(payload, dlen, psum);
 }
 
-#ifdef __AVX2__
+#if defined(__AVX2__)
 #include <immintrin.h>
 
 /**
@@ -479,7 +479,111 @@ uint32_t csum_unfolded(const void *buf, size_t len, uint32_t init)
 
 	return init;
 }
-#else /* __AVX2__ */
+#elif defined(__POWER9_VECTOR__) || defined(__POWER8_VECTOR__)
+#include <altivec.h>
+
+/**
+ * csum_vsx() - Compute 32-bit checksum using VSX SIMD instructions
+ * @buf:	Input buffer
+ * @len:	Input length
+ * @init:	Initial 32-bit checksum, 0 for no pre-computed checksum
+ *
+ * Return: 32-bit checksum, not complemented, not folded
+ */
+/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
+__attribute__((optimize("-fno-strict-aliasing")))	/* See csum_16b() */
+static uint32_t csum_vsx(const void *buf, size_t len, uint32_t init)
+{
+	const uint8_t *p = buf;
+	vector unsigned int sum_even = vec_splat_u32(0);
+	vector unsigned int sum_odd = vec_splat_u32(0);
+	const vector unsigned short ones = vec_splat_u16(1);
+	uint64_t sum64 = init;
+
+#ifdef __POWER9_VECTOR__
+	while (len >= 64) {
+		vector unsigned char v0b = vec_vsx_ld(0, p);
+		vector unsigned char v1b = vec_vsx_ld(16, p);
+		vector unsigned char v2b = vec_vsx_ld(32, p);
+		vector unsigned char v3b = vec_vsx_ld(48, p);
+		vector unsigned short v0 = (vector unsigned short)v0b;
+		vector unsigned short v1 = (vector unsigned short)v1b;
+		vector unsigned short v2 = (vector unsigned short)v2b;
+		vector unsigned short v3 = (vector unsigned short)v3b;
+
+		sum_even = vec_add(sum_even, vec_mule(v0, ones));
+		sum_odd = vec_add(sum_odd, vec_mulo(v0, ones));
+		sum_even = vec_add(sum_even, vec_mule(v1, ones));
+		sum_odd = vec_add(sum_odd, vec_mulo(v1, ones));
+		sum_even = vec_add(sum_even, vec_mule(v2, ones));
+		sum_odd = vec_add(sum_odd, vec_mulo(v2, ones));
+		sum_even = vec_add(sum_even, vec_mule(v3, ones));
+		sum_odd = vec_add(sum_odd, vec_mulo(v3, ones));
+
+		p += 64;
+		len -= 64;
+	}
+#endif
+
+	while (len >= 32) {
+		vector unsigned char v0b = vec_vsx_ld(0, p);
+		vector unsigned char v1b = vec_vsx_ld(16, p);
+		vector unsigned short v0 = (vector unsigned short)v0b;
+		vector unsigned short v1 = (vector unsigned short)v1b;
+
+		sum_even = vec_add(sum_even, vec_mule(v0, ones));
+		sum_odd = vec_add(sum_odd, vec_mulo(v0, ones));
+		sum_even = vec_add(sum_even, vec_mule(v1, ones));
+		sum_odd = vec_add(sum_odd, vec_mulo(v1, ones));
+
+		p += 32;
+		len -= 32;
+	}
+
+	while (len >= 16) {
+		vector unsigned char v0b = vec_vsx_ld(0, p);
+		vector unsigned short v0 = (vector unsigned short)v0b;
+
+		sum_even = vec_add(sum_even, vec_mule(v0, ones));
+		sum_odd = vec_add(sum_odd, vec_mulo(v0, ones));
+
+		p += 16;
+		len -= 16;
+	}
+
+	{
+		vector unsigned int sum32 = vec_add(sum_even, sum_odd);
+		uint32_t partial[4] __attribute__((aligned(16)));
+
+		vec_st(sum32, 0, partial);
+		sum64 += (uint64_t)partial[0] + partial[1] +
+			 partial[2] + partial[3];
+	}
+
+	sum64 += sum_16b(p, len);
+
+	sum64 = (sum64 >> 32) + (sum64 & 0xffffffff);
+	sum64 += sum64 >> 32;
+
+	return (uint32_t)sum64;
+}
+
+/**
+ * csum_unfolded() - Calculate the unfolded checksum of a data buffer.
+ *
+ * @buf:   Input buffer
+ * @len:   Input length
+ * @init:  Initial 32-bit checksum, 0 for no pre-computed checksum
+ *
+ * Return: 32-bit unfolded checksum
+ */
+/* NOLINTNEXTLINE(clang-diagnostic-unknown-attributes) */
+__attribute__((optimize("-fno-strict-aliasing")))	/* See csum_16b() */
+uint32_t csum_unfolded(const void *buf, size_t len, uint32_t init)
+{
+	return csum_vsx(buf, len, init);
+}
+#else /* !__AVX2__ && !__POWER9_VECTOR__ && !__POWER8_VECTOR__ */
 /**
  * csum_unfolded() - Calculate the unfolded checksum of a data buffer.
  *
@@ -495,7 +599,7 @@ uint32_t csum_unfolded(const void *buf, size_t len, uint32_t init)
 {
 	return sum_16b(buf, len) + init;
 }
-#endif /* !__AVX2__ */
+#endif /* !__AVX2__ && !__POWER9_VECTOR__ && !__POWER8_VECTOR__ */
 
 /**
  * csum_iov_tail() - Calculate unfolded checksum for the tail of an IO vector
-- 
2.52.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2026-02-11 11:55 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-02-05  6:14 checksum: add VSX fast path for POWER8/POWER9 jfiusdq
2026-02-05 18:03 ` Stefano Brivio
2026-02-06 15:17 ` Laurent Vivier
2026-02-07 22:31   ` jfiusdq
2026-02-10 13:36     ` Cédric Le Goater
2026-02-11 11:55       ` jfiusdq

Code repositories for project(s) associated with this public inbox

	https://passt.top/passt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for IMAP folder(s).