From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from gandalf.ozlabs.org (gandalf.ozlabs.org [150.107.74.76]) by passt.top (Postfix) with ESMTPS id 1D0145A027A for ; Mon, 4 Dec 2023 04:16:20 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gibson.dropbear.id.au; s=202312; t=1701659773; bh=FezM3dDM5gSftrnBs5jm0c5mTKSlnWKbYYerYzNFmL8=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=f/wZI2swWcU6vOhHpmDDMmbVonVMDGqU8pKjNBcVB34ygPegPsa9Gb7Lu+h7cKIjy hRSNEcrC77HVLpf0+EPuDyVfarh42/T4Qbd09H/ap4V5vmvWCpMtgvn9zvGIle3fuW aca55XYvJuw8YgM4o8wjjTZbGhBXkeUSx/f7PjUaXkgNoLNjvlmWQBk3qnCc8b/qB2 cxNsMmoysDu7gPP7LmyZDNS+3hGKzz6g+kTv7yfKLMaqDxH8qmkH27NMYjoR2eJ5+B TjQDU/4d2V9DZOoook5tJUy42c9MqDaVPVyN1vvxRaBZUbTxmfF0wJyOJJkCOc880Q YRPDOsu46eXOA== Received: by gandalf.ozlabs.org (Postfix, from userid 1007) id 4Sk82P5bt3z4x5n; Mon, 4 Dec 2023 14:16:13 +1100 (AEDT) From: David Gibson To: passt-dev@passt.top, Stefano Brivio Subject: [PATCH 2/3] tcp: Implement hash table with indices rather than pointers Date: Mon, 4 Dec 2023 14:16:10 +1100 Message-ID: <20231204031611.3566791-3-david@gibson.dropbear.id.au> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20231204031611.3566791-1-david@gibson.dropbear.id.au> References: <20231204031611.3566791-1-david@gibson.dropbear.id.au> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Message-ID-Hash: CFFLOALOTHOWO2FLT2GZB5GUMGERHDJX X-Message-ID-Hash: CFFLOALOTHOWO2FLT2GZB5GUMGERHDJX X-MailFrom: dgibson@gandalf.ozlabs.org X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; emergency; loop; banned-address; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header CC: David Gibson X-Mailman-Version: 3.3.8 Precedence: list List-Id: Development discussion and patches for passt Archived-At: Archived-At: List-Archive: List-Archive: List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: We implement our hash table with pointers to the entry for each bucket (or NULL). However, the entries are always allocated within the flow table, meaning that a flow index will suffice, halving the size of the hash table. For TCP, just a flow index would be enough, but future uses will want to expand the hash table to cover indexing either side of a flow, so use a flow_sidx_t as the type for each hash bucket. Signed-off-by: David Gibson --- flow.h | 11 +++++++++++ tcp.c | 34 +++++++++++++++++++++++----------- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/flow.h b/flow.h index c2a5190..959b461 100644 --- a/flow.h +++ b/flow.h @@ -53,6 +53,17 @@ static_assert(sizeof(flow_sidx_t) <= sizeof(uint32_t), #define FLOW_SIDX_NONE ((flow_sidx_t){ .flow = FLOW_MAX }) +/** + * flow_sidx_eq() - Test if two sidx values are equal + * @a, @b: sidx values + * + * Return: true iff @a and @b refer to the same side of the same flow + */ +static inline bool flow_sidx_eq(flow_sidx_t a, flow_sidx_t b) +{ + return (a.flow == b.flow) && (a.side == b.side); +} + union flow; void flow_table_compact(struct ctx *c, union flow *hole); diff --git a/tcp.c b/tcp.c index 09acf7f..7e438b7 100644 --- a/tcp.c +++ b/tcp.c @@ -574,7 +574,7 @@ static unsigned int tcp6_l2_flags_buf_used; #define CONN(idx) (&(FLOW(idx)->tcp)) /* Table for lookup from remote address, local port, remote port */ -static struct tcp_tap_conn *tc_hash[TCP_HASH_TABLE_SIZE]; +static flow_sidx_t tc_hash[TCP_HASH_TABLE_SIZE]; static_assert(ARRAY_SIZE(tc_hash) >= FLOW_MAX, "Safe linear probing requires hash table larger than connection table"); @@ -1197,10 +1197,13 @@ static unsigned int tcp_conn_hash(const struct ctx *c, static inline unsigned tcp_hash_probe(const struct ctx *c, const struct tcp_tap_conn *conn) { + flow_sidx_t sidx = FLOW_SIDX(conn, TAPSIDE); unsigned b; /* Linear probing */ - for (b = tcp_conn_hash(c, conn); tc_hash[b] && tc_hash[b] != conn; + for (b = tcp_conn_hash(c, conn); + !flow_sidx_eq(tc_hash[b], FLOW_SIDX_NONE) && + !flow_sidx_eq(tc_hash[b], sidx); b = (b + 1) % TCP_HASH_TABLE_SIZE) ; @@ -1216,7 +1219,7 @@ static void tcp_hash_insert(const struct ctx *c, struct tcp_tap_conn *conn) { unsigned b = tcp_hash_probe(c, conn); - tc_hash[b] = conn; + tc_hash[b] = FLOW_SIDX(conn, TAPSIDE); flow_dbg(conn, "hash table insert: sock %i, bucket: %u", conn->sock, b); } @@ -1229,16 +1232,18 @@ static void tcp_hash_remove(const struct ctx *c, const struct tcp_tap_conn *conn) { unsigned b = tcp_hash_probe(c, conn), s; + union flow *flow = flow_at_sidx(tc_hash[b]); - if (!tc_hash[b]) + if (!flow) return; /* Redundant remove */ flow_dbg(conn, "hash table remove: sock %i, bucket: %u", conn->sock, b); /* Scan the remainder of the cluster */ - for (s = (b + 1) % TCP_HASH_TABLE_SIZE; tc_hash[s]; + for (s = (b + 1) % TCP_HASH_TABLE_SIZE; + (flow = flow_at_sidx(tc_hash[s])); s = (s + 1) % TCP_HASH_TABLE_SIZE) { - unsigned h = tcp_conn_hash(c, tc_hash[s]); + unsigned h = tcp_conn_hash(c, &flow->tcp); if (in_mod_range(h, b, s, TCP_HASH_TABLE_SIZE)) { /* tc_hash[s] can live in tc_hash[b]'s slot */ @@ -1248,7 +1253,7 @@ static void tcp_hash_remove(const struct ctx *c, } } - tc_hash[b] = NULL; + tc_hash[b] = FLOW_SIDX_NONE; } /** @@ -1263,10 +1268,10 @@ void tcp_tap_conn_update(const struct ctx *c, struct tcp_tap_conn *old, { unsigned b = tcp_hash_probe(c, old); - if (!tc_hash[b]) + if (!flow_at_sidx(tc_hash[b])) return; /* Not in hash table, nothing to update */ - tc_hash[b] = new; + tc_hash[b] = FLOW_SIDX(new, TAPSIDE); debug("TCP: hash table update: old index %u, new index %u, sock %i, " "bucket: %u", FLOW_IDX(old), FLOW_IDX(new), new->sock, b); @@ -1289,16 +1294,18 @@ static struct tcp_tap_conn *tcp_hash_lookup(const struct ctx *c, in_port_t eport, in_port_t fport) { union inany_addr aany; + union flow *flow; unsigned b; inany_from_af(&aany, af, faddr); for (b = tcp_hash(c, &aany, eport, fport); - tc_hash[b] && !tcp_hash_match(tc_hash[b], &aany, eport, fport); + (flow = flow_at_sidx(tc_hash[b])) + && !tcp_hash_match(&flow->tcp, &aany, eport, fport); b = (b + 1) % TCP_HASH_TABLE_SIZE) ; - return tc_hash[b]; + return &flow->tcp; } /** @@ -3090,6 +3097,11 @@ static void tcp_sock_refill_init(const struct ctx *c) */ int tcp_init(struct ctx *c) { + unsigned b; + + for (b = 0; b < TCP_HASH_TABLE_SIZE; b++) + tc_hash[b] = FLOW_SIDX_NONE; + if (c->ifi4) tcp_sock4_iov_init(c); -- 2.43.0