Re: [v4,net-next,5/5] octeontx2-af: npc: Support for custom KPU profile from filesystem
From: Jakub Kicinski
Date: Mon Mar 16 2026 - 23:33:14 EST
This is an AI-generated review of your patch. The human sending this
email says "review on patch 3 looks legit, so sending this along
even tho not so sure about this one".
For local reproduction steps see https://netdev-ai.bots.linux.dev/ai-local.html
---
octeontx2-af: npc: Support for custom KPU profile from filesystem
This commit provides a mechanism to load custom KPU (Key Parse Unit)
profiles from the filesystem at module load time, avoiding the need to
flash updated firmware on deployed devices.
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c
> index 8dad272fd069..c8c09a91b13f 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c
[ ... ]
> @@ -666,7 +672,8 @@ void npc_cn20k_parser_profile_init(struct rvu *rvu, int blkaddr)
> num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
>
> for (idx = 0; idx < num_pkinds; idx++)
> - npc_config_kpmaction(rvu, blkaddr, &rvu->kpu.ikpu[idx],
> + npc_config_kpmaction(rvu, blkaddr,
> + npc_get_ikpu_nth_entry(rvu, idx),
> 0, idx, true);
When a filesystem-loaded profile is used on cn20k (from_fs=true),
npc_get_ikpu_nth_entry() returns &profile->ikpu2[idx]. However, ikpu2
is never allocated for the cn20k path.
The allocation of ikpu2 happens inside npc_apply_custom_kpu() in the
from_fs block, but that block is never reached for cn20k because
npc_apply_custom_kpu() returns early via npc_cn20k_apply_custom_kpu()
before reaching the ikpu2 allocation:
static int npc_apply_custom_kpu(struct rvu *rvu,
struct npc_kpu_profile_adapter *profile,
bool from_fs, int *fw_kpus)
{
...
if (is_cn20k(rvu->pdev))
return npc_cn20k_apply_custom_kpu(rvu, profile);
if (from_fs) {
profile->ikpu2 = devm_kcalloc(rvu->dev, 1,
sizeof(ikpu_action_entries),
GFP_KERNEL);
...
}
...
}
However, npc_load_kpu_profile() unconditionally sets profile->from_fs =
true after npc_apply_custom_kpu() returns success.
Does this code dereference a NULL-offset pointer? The resulting
&NULL[idx] is passed to npc_config_kpmaction() which dereferences it
(kpuaction->errlev), which would cause a kernel crash.
This same issue affects cam2 and action2 via npc_get_kpu_cam_nth_entry()
and npc_get_kpu_action_nth_entry().
>
> /* Program KPM CAM and Action profiles */
[ ... ]
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
> index 352dc5f1d5b9..e348cdd375eb 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
[ ... ]
> @@ -1731,32 +1837,89 @@ static int npc_apply_custom_kpu(struct rvu *rvu,
> return -EINVAL;
> }
>
> + *fw_kpus = fw->kpus;
> +
> + sfw = devm_kcalloc(rvu->dev, 1, sizeof(*sfw), GFP_KERNEL);
> + if (!sfw)
> + return -ENOMEM;
> +
> + memcpy(sfw, fw, sizeof(*sfw));
> +
> profile->custom = 1;
> - profile->name = fw->name;
> + profile->name = sfw->name;
> profile->version = le64_to_cpu(fw->version);
> - profile->mcam_kex_prfl.mkex = &fw->mkex;
> - profile->lt_def = &fw->lt_def;
> + profile->mcam_kex_prfl.mkex = &sfw->mkex;
> + profile->lt_def = &sfw->lt_def;
> +
> + /* Binary blob contains ikpu actions entries at start of data[0] */
> + if (from_fs) {
> + profile->ikpu2 = devm_kcalloc(rvu->dev, 1,
> + sizeof(ikpu_action_entries),
> + GFP_KERNEL);
> + if (!profile->ikpu2)
> + return -ENOMEM;
> +
> + action = (struct npc_kpu_profile_action *)(fw->data + offset);
> +
> + if (rvu->kpu_fwdata_sz < hdr_sz + sizeof(ikpu_action_entries))
> + return -ENOMEM;
> +
> + memcpy((void *)profile->ikpu2, action, sizeof(ikpu_action_entries));
> + offset += sizeof(ikpu_action_entries);
> + }
> +
> for (kpu = 0; kpu < fw->kpus; kpu++) {
> fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset);
> - if (fw_kpu->entries > KPU_MAX_CST_ENT)
> - dev_warn(rvu->dev,
> - "Too many custom entries on KPU%d: %d > %d\n",
> - kpu, fw_kpu->entries, KPU_MAX_CST_ENT);
> - entries = min(fw_kpu->entries, KPU_MAX_CST_ENT);
> - cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
> - offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
> + if (!from_fs) {
> + if (fw_kpu->entries > KPU_MAX_CST_ENT)
> + dev_warn(rvu->dev,
> + "Too many custom entries on KPU%d: %d > %d\n",
> + kpu, fw_kpu->entries, KPU_MAX_CST_ENT);
> + entries = min(fw_kpu->entries, KPU_MAX_CST_ENT);
> + cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
> + offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
> + action = (struct npc_kpu_profile_action *)(fw->data + offset);
> + offset += fw_kpu->entries * sizeof(*action);
> + if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
> + dev_warn(rvu->dev,
> + "Profile size mismatch on KPU%i parsing.\n",
> + kpu + 1);
> + return -EINVAL;
> + }
> + for (entry = 0; entry < entries; entry++) {
> + profile->kpu[kpu].cam[entry] = cam[entry];
> + profile->kpu[kpu].action[entry] = action[entry];
> + }
> + continue;
> + }
> + entries = fw_kpu->entries;
> + dev_info(rvu->dev,
> + "Loading %u entries on KPU%d\n", entries, kpu);
> +
> + cam2 = (struct npc_kpu_profile_cam2 *)fw_kpu->data;
> + offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam2);
> action = (struct npc_kpu_profile_action *)(fw->data + offset);
> offset += fw_kpu->entries * sizeof(*action);
> if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
> dev_warn(rvu->dev,
> - "Profile size mismatch on KPU%i parsing.\n",
> + "profile size mismatch on kpu%i parsing.\n",
> kpu + 1);
> return -EINVAL;
> }
> +
> + profile->kpu[kpu].cam_entries2 = entries;
> + profile->kpu[kpu].action_entries2 = entries;
> + ret = npc_alloc_kpu_cam2_n_action2(rvu, kpu, entries);
> + if (ret) {
> + dev_warn(rvu->dev,
> + "profile entry allocation failed for kpu=%d for %d entries\n",
> + kpu, entries);
> + return -EINVAL;
> + }
> +
> for (entry = 0; entry < entries; entry++) {
> - profile->kpu[kpu].cam[entry] = cam[entry];
> - profile->kpu[kpu].action[entry] = action[entry];
> + profile->kpu[kpu].cam2[entry] = cam2[entry];
> + profile->kpu[kpu].action2[entry] = action[entry];
> }
> }
This is a concern raised by Simon Horman during review that was
acknowledged but not addressed in v4.
The loop variable 'entry' is declared as u16 in npc_apply_custom_kpu():
u16 kpu, entry;
while 'entries' is an int:
entries = fw_kpu->entries;
In the from_fs path, 'entries' is read directly from the firmware blob
without bounds checking. If a corrupt or malicious firmware file
specifies fw_kpu->entries > 65535, the u16 variable wraps around at
65536:
for (entry = 0; entry < entries; entry++)
When entry reaches 65535, incrementing it produces 0, which is still
less than entries, creating an infinite loop that hangs the kernel.
Should the code cap entries in the from_fs path or use an int-sized loop
variable? The author acknowledged this concern with 'ACK' in v3 review
discussion, and Simon emphasized that "the kernel should not rely on
filesystem data being well-formed."
>
> return 0;
[ ... ]
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npc.c
> @@ -521,13 +521,17 @@ npc_program_single_kpm_profile(struct rvu *rvu, int blkaddr,
[ ... ]
> @@ -536,16 +540,18 @@ npc_program_single_kpm_profile(struct rvu *rvu, int blkaddr,
> max_entries = rvu->hw->npc_kpu_entries / 2;
> entry = start_entry;
> /* Program CAM match entries for previous kpm extracted data */
> - num_entries = min_t(int, profile->cam_entries, max_entries);
> + num_entries = min_t(int, num_cam_entries, max_entries);
> for (idx = 0; entry < num_entries + start_entry; entry++, idx++)
> - npc_config_kpmcam(rvu, blkaddr, &profile->cam[idx],
> + npc_config_kpmcam(rvu, blkaddr,
> + npc_get_kpu_cam_nth_entry(rvu, profile, idx),
> kpm, entry);
>
> entry = start_entry;
> /* Program this kpm's actions */
> - num_entries = min_t(int, profile->action_entries, max_entries);
> + num_entries = min_t(int, num_action_entries, max_entries);
> for (idx = 0; entry < num_entries + start_entry; entry++, idx++)
> - npc_config_kpmaction(rvu, blkaddr, &profile->action[idx],
> + npc_config_kpmaction(rvu, blkaddr,
> + npc_get_kpu_action_nth_entry(rvu, profile, idx),
> kpm, entry, false);
> }
>
> @@ -615,6 +621,12 @@ npc_program_kpm_profile(struct rvu *rvu, int blkaddr,
> const struct npc_kpu_profile *profile1,
> const struct npc_kpu_profile *profile2)
> {
[ ... ]
> + /* Program the first KPM profile */
> + npc_program_single_kpm_profile(rvu, blkaddr, idx,
> + 0, profile1);
> +
> + /* Program the second KPM profile */
> + start_entry = profile->cam_entries;
> + npc_program_single_kpm_profile(rvu, blkaddr, idx,
> + start_entry, profile2);
> +
> + total_cam_entries = profile->cam_entries + profile2->cam_entries;
> + total_action_entries = profile->action_entries + profile2->action_entries;
> + npc_enable_kpm_entry(rvu, blkaddr, idx, 0,
> + total_cam_entries, total_action_entries);
> +
> + val = profile->cam_entries;
> + rvu_write64(rvu, blkaddr, NPC_AF_KPMX_PASS2_OFFSET(idx), val);
> }
Looking at npc_program_kpm_profile(), it uses profile->cam_entries
directly in four places instead of calling npc_get_num_kpu_cam_entries().
When profile->from_fs is true (filesystem-loaded KPU profile), the
accessor returns cam_entries2, while the raw field cam_entries retains
its default value from npc_prepare_default_kpu().
Does this cause the start_entry for the second profile to be based on
the wrong count, leading profile2 entries to overlap or be offset
incorrectly? Would total_cam_entries and total_action_entries passed to
npc_enable_kpm_entry() be wrong, enabling the wrong set of entries? And
would NPC_AF_KPMX_PASS2_OFFSET be set incorrectly?
The callee npc_program_single_kpm_profile() was correctly updated to use
accessors, but npc_program_kpm_profile() was not.