1
0
Fork 0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-06-08 00:07:34 +09:00

Including fixes from CAN, wireless, Bluetooth, and Netfilter.

Current release - regressions:
 
  - Revert "kunit: configs: Enable CONFIG_INIT_STACK_ALL_PATTERN
    in all_tests", makes kunit error out if compiler is old
 
  - wifi: iwlwifi: mvm: fix assert on suspend
 
  - rxrpc: fix return from none_validate_challenge()
 
 Current release - new code bugs:
 
  - ovpn: couple of fixes for socket cleanup and UDP-tunnel teardown
 
  - can: kvaser_pciefd: refine error prone echo_skb_max handling logic
 
  - fix net_devmem_bind_dmabuf() stub when DEVMEM not compiled
 
  - eth: airoha: fixes for config / accel in bridge mode
 
 Previous releases - regressions:
 
  - Bluetooth: hci_qca: move the SoC type check to the right place,
    fix GPIO integration
 
  - prevent a NULL deref in rtnl_create_link() after locking changes
 
  - fix udp gso skb_segment after pull from frag_list
 
  - hv_netvsc: fix potential deadlock in netvsc_vf_setxdp()
 
 Previous releases - always broken:
 
  - netfilter:
    - nf_nat: also check reverse tuple to obtain clashing entry
    - nf_set_pipapo_avx2: fix initial map fill (zeroing)
 
  - fix the helper for incremental update of packet checksums after
    modifying the IP address, used by ILA and BPF
 
  - eth: stmmac: prevent div by 0 when clock rate is misconfigured
 
  - eth: ice: fix Tx scheduler handling of XDP and changing queue count
 
  - eth: b53: fix support for the RGMII interface when delays configured
 
 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmhBv5kACgkQMUZtbf5S
 Irs/DA/+PIh7a33iVcsGIcmWtpnGp+18id1tSLnYGUGx1cW6zxutPD8rb6BsAN84
 KR+XVsbMDUehIa10xPoF2L5mX5YujEiPSkjP8eE2KJKDLGpDtYNOyOWKT21yudnd
 4EVF5JQoEbWHrkHMKF97tla84QLd5fFtgsvejVeZtQYSIDOteNGfra4Jly8iiR+J
 i9k+HdB0CNEKVvvibQZjZ5CrkpmdNPmB9UoJ59bG15q2+vXdzOPm/CCNo//9ZQJB
 I8O40nu16msRRVA9nc2V/Tp98fTk9dnDpTSyWiBlNCut9g9ftx456Ew+tjobMRIT
 yeh+q9+1z3YHjGJB8P1FGmMZWK3tbrwyqjFGqpSjr7juucFok9kxAaRPqrQxga7H
 Yxq3RegeNqukEAV39ZE14TL765Jy+XXF1uTHhNBkUADlNJVKnZygSk78/Ut2nDvQ
 vkfoto+CfKny5qkSbTk8KKv1rZu3xwewoOjlcdkHlOBoouCjPOxTC7yxTZgUZB5c
 yap0jQsedJct4OAA+O7IGLCmf3KrJ0H32HbWEY68mpTEd+4Df5vAWiIi7vmVJmk3
 DX9JWmu5A5yjNMhOEsBQU98gkNw366aA/E8dr+lEfp3AoqDrmdbG3l8+qqhqYnb+
 nnL1sNiQH1griZwQBUROAhrtXnYlYsAsZi+cv23Q0hQiGIvIC2Q=
 =sRQt
 -----END PGP SIGNATURE-----

Merge tag 'net-6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
 "Including fixes from CAN, wireless, Bluetooth, and Netfilter.

  Current release - regressions:

   - Revert "kunit: configs: Enable CONFIG_INIT_STACK_ALL_PATTERN in
     all_tests", makes kunit error out if compiler is old

   - wifi: iwlwifi: mvm: fix assert on suspend

   - rxrpc: fix return from none_validate_challenge()

  Current release - new code bugs:

   - ovpn: couple of fixes for socket cleanup and UDP-tunnel teardown

   - can: kvaser_pciefd: refine error prone echo_skb_max handling logic

   - fix net_devmem_bind_dmabuf() stub when DEVMEM not compiled

   - eth: airoha: fixes for config / accel in bridge mode

  Previous releases - regressions:

   - Bluetooth: hci_qca: move the SoC type check to the right place, fix
     GPIO integration

   - prevent a NULL deref in rtnl_create_link() after locking changes

   - fix udp gso skb_segment after pull from frag_list

   - hv_netvsc: fix potential deadlock in netvsc_vf_setxdp()

  Previous releases - always broken:

   - netfilter:
       - nf_nat: also check reverse tuple to obtain clashing entry
       - nf_set_pipapo_avx2: fix initial map fill (zeroing)

   - fix the helper for incremental update of packet checksums after
     modifying the IP address, used by ILA and BPF

   - eth:
       - stmmac: prevent div by 0 when clock rate is misconfigured
       - ice: fix Tx scheduler handling of XDP and changing queue count
       - eth: fix support for the RGMII interface when delays configured"

* tag 'net-6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (76 commits)
  calipso: unlock rcu before returning -EAFNOSUPPORT
  seg6: Fix validation of nexthop addresses
  net: prevent a NULL deref in rtnl_create_link()
  net: annotate data-races around cleanup_net_task
  selftests: drv-net: tso: make bkg() wait for socat to quit
  selftests: drv-net: tso: fix the GRE device name
  selftests: drv-net: add configs for the TSO test
  wireguard: device: enable threaded NAPI
  netlink: specs: rt-link: decode ip6gre
  netlink: specs: rt-link: add missing byte-order properties
  net: wwan: mhi_wwan_mbim: use correct mux_id for multiplexing
  wifi: cfg80211/mac80211: correctly parse S1G beacon optional elements
  net: dsa: b53: do not touch DLL_IQQD on bcm53115
  net: dsa: b53: allow RGMII for bcm63xx RGMII ports
  net: dsa: b53: do not configure bcm63xx's IMP port interface
  net: dsa: b53: do not enable RGMII delay on bcm63xx
  net: dsa: b53: do not enable EEE on bcm63xx
  net: ti: icssg-prueth: Fix swapped TX stats for MII interfaces.
  selftests: netfilter: nft_nat.sh: add test for reverse clash with nat
  netfilter: nf_nat: also check reverse tuple to obtain clashing entry
  ...
This commit is contained in:
Linus Torvalds 2025-06-05 12:34:55 -07:00
commit 2c7e4a2663
89 changed files with 1060 additions and 618 deletions

View file

@ -1685,15 +1685,19 @@ attribute-sets:
- -
name: iflags name: iflags
type: u16 type: u16
byte-order: big-endian
- -
name: oflags name: oflags
type: u16 type: u16
byte-order: big-endian
- -
name: ikey name: ikey
type: u32 type: u32
byte-order: big-endian
- -
name: okey name: okey
type: u32 type: u32
byte-order: big-endian
- -
name: local name: local
type: binary type: binary
@ -1713,10 +1717,11 @@ attribute-sets:
type: u8 type: u8
- -
name: encap-limit name: encap-limit
type: u32 type: u8
- -
name: flowinfo name: flowinfo
type: u32 type: u32
byte-order: big-endian
- -
name: flags name: flags
type: u32 type: u32
@ -1729,9 +1734,11 @@ attribute-sets:
- -
name: encap-sport name: encap-sport
type: u16 type: u16
byte-order: big-endian
- -
name: encap-dport name: encap-dport
type: u16 type: u16
byte-order: big-endian
- -
name: collect-metadata name: collect-metadata
type: flag type: flag
@ -1753,6 +1760,54 @@ attribute-sets:
- -
name: erspan-hwid name: erspan-hwid
type: u16 type: u16
-
name: linkinfo-gre6-attrs
subset-of: linkinfo-gre-attrs
attributes:
-
name: link
-
name: iflags
-
name: oflags
-
name: ikey
-
name: okey
-
name: local
display-hint: ipv6
-
name: remote
display-hint: ipv6
-
name: ttl
-
name: encap-limit
-
name: flowinfo
-
name: flags
-
name: encap-type
-
name: encap-flags
-
name: encap-sport
-
name: encap-dport
-
name: collect-metadata
-
name: fwmark
-
name: erspan-index
-
name: erspan-ver
-
name: erspan-dir
-
name: erspan-hwid
- -
name: linkinfo-vti-attrs name: linkinfo-vti-attrs
name-prefix: ifla-vti- name-prefix: ifla-vti-
@ -1764,9 +1819,11 @@ attribute-sets:
- -
name: ikey name: ikey
type: u32 type: u32
byte-order: big-endian
- -
name: okey name: okey
type: u32 type: u32
byte-order: big-endian
- -
name: local name: local
type: binary type: binary
@ -1816,6 +1873,7 @@ attribute-sets:
- -
name: port name: port
type: u16 type: u16
byte-order: big-endian
- -
name: collect-metadata name: collect-metadata
type: flag type: flag
@ -1835,6 +1893,7 @@ attribute-sets:
- -
name: label name: label
type: u32 type: u32
byte-order: big-endian
- -
name: ttl-inherit name: ttl-inherit
type: u8 type: u8
@ -1875,9 +1934,11 @@ attribute-sets:
- -
name: flowinfo name: flowinfo
type: u32 type: u32
byte-order: big-endian
- -
name: flags name: flags
type: u16 type: u16
byte-order: big-endian
- -
name: proto name: proto
type: u8 type: u8
@ -1907,9 +1968,11 @@ attribute-sets:
- -
name: encap-sport name: encap-sport
type: u16 type: u16
byte-order: big-endian
- -
name: encap-dport name: encap-dport
type: u16 type: u16
byte-order: big-endian
- -
name: collect-metadata name: collect-metadata
type: flag type: flag
@ -2224,6 +2287,9 @@ sub-messages:
- -
value: gretap value: gretap
attribute-set: linkinfo-gre-attrs attribute-set: linkinfo-gre-attrs
-
value: ip6gre
attribute-set: linkinfo-gre6-attrs
- -
value: geneve value: geneve
attribute-set: linkinfo-geneve-attrs attribute-set: linkinfo-geneve-attrs

View file

@ -533,6 +533,8 @@ static int ps_setup(struct hci_dev *hdev)
ps_host_wakeup_irq_handler, ps_host_wakeup_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING, IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
dev_name(&serdev->dev), nxpdev); dev_name(&serdev->dev), nxpdev);
if (ret)
bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");
disable_irq(psdata->irq_handler); disable_irq(psdata->irq_handler);
device_init_wakeup(&serdev->dev, true); device_init_wakeup(&serdev->dev, true);
} }

View file

@ -2415,14 +2415,14 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW); GPIOD_OUT_LOW);
if (IS_ERR(qcadev->bt_en) && if (IS_ERR(qcadev->bt_en))
(data->soc_type == QCA_WCN6750 || return dev_err_probe(&serdev->dev,
data->soc_type == QCA_WCN6855)) { PTR_ERR(qcadev->bt_en),
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n"); "failed to acquire BT_EN gpio\n");
return PTR_ERR(qcadev->bt_en);
}
if (!qcadev->bt_en) if (!qcadev->bt_en &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855))
power_ctrl_enabled = false; power_ctrl_enabled = false;
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl", qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",

View file

@ -966,7 +966,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
u32 status, tx_nr_packets_max; u32 status, tx_nr_packets_max;
netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
KVASER_PCIEFD_CAN_TX_MAX_COUNT); roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT));
if (!netdev) if (!netdev)
return -ENOMEM; return -ENOMEM;
@ -995,7 +995,6 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->can.clock.freq = pcie->freq; can->can.clock.freq = pcie->freq;
can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count);
spin_lock_init(&can->lock); spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const; can->can.bittiming_const = &kvaser_pciefd_bittiming_const;

View file

@ -22,6 +22,7 @@
#include <linux/gpio.h> #include <linux/gpio.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/math.h> #include <linux/math.h>
#include <linux/minmax.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_data/b53.h> #include <linux/platform_data/b53.h>
#include <linux/phy.h> #include <linux/phy.h>
@ -1322,41 +1323,17 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
phy_interface_t interface) phy_interface_t interface)
{ {
struct b53_device *dev = ds->priv; struct b53_device *dev = ds->priv;
u8 rgmii_ctrl = 0, off; u8 rgmii_ctrl = 0;
if (port == dev->imp_port) b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl);
off = B53_RGMII_CTRL_IMP; rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
else
off = B53_RGMII_CTRL_P(port);
b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); if (is63268(dev))
rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
switch (interface) { rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
case PHY_INTERFACE_MODE_RGMII_ID:
rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC);
rgmii_ctrl |= RGMII_CTRL_DLL_RXC;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC);
rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
break;
case PHY_INTERFACE_MODE_RGMII:
default:
rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
break;
}
if (port != dev->imp_port) { b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl);
if (is63268(dev))
rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
}
b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
dev_dbg(ds->dev, "Configured port %d for %s\n", port, dev_dbg(ds->dev, "Configured port %d for %s\n", port,
phy_modes(interface)); phy_modes(interface));
@ -1377,8 +1354,7 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
* tx_clk aligned timing (restoring to reset defaults) * tx_clk aligned timing (restoring to reset defaults)
*/ */
b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
RGMII_CTRL_TIMING_SEL);
/* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
* sure that we enable the port TX clock internal delay to * sure that we enable the port TX clock internal delay to
@ -1398,7 +1374,10 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
rgmii_ctrl |= RGMII_CTRL_DLL_TXC; rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
if (interface == PHY_INTERFACE_MODE_RGMII) if (interface == PHY_INTERFACE_MODE_RGMII)
rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
if (dev->chip_id != BCM53115_DEVICE_ID)
rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
dev_info(ds->dev, "Configured port %d for %s\n", port, dev_info(ds->dev, "Configured port %d for %s\n", port,
@ -1462,6 +1441,10 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces); __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
/* BCM63xx RGMII ports support RGMII */
if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
phy_interface_set_rgmii(config->supported_interfaces);
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100; MAC_10 | MAC_100;
@ -1501,7 +1484,7 @@ static void b53_phylink_mac_config(struct phylink_config *config,
struct b53_device *dev = ds->priv; struct b53_device *dev = ds->priv;
int port = dp->index; int port = dp->index;
if (is63xx(dev) && port >= B53_63XX_RGMII0) if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
b53_adjust_63xx_rgmii(ds, port, interface); b53_adjust_63xx_rgmii(ds, port, interface);
if (mode == MLO_AN_FIXED) { if (mode == MLO_AN_FIXED) {
@ -2353,6 +2336,9 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{ {
int ret; int ret;
if (!b53_support_eee(ds, port))
return 0;
ret = phy_init_eee(phy, false); ret = phy_init_eee(phy, false);
if (ret) if (ret)
return 0; return 0;
@ -2367,7 +2353,7 @@ bool b53_support_eee(struct dsa_switch *ds, int port)
{ {
struct b53_device *dev = ds->priv; struct b53_device *dev = ds->priv;
return !is5325(dev) && !is5365(dev); return !is5325(dev) && !is5365(dev) && !is63xx(dev);
} }
EXPORT_SYMBOL(b53_support_eee); EXPORT_SYMBOL(b53_support_eee);

View file

@ -84,6 +84,8 @@ static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
val = (addr[3] << 16) | (addr[4] << 8) | addr[5]; val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val); airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val); airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
airoha_ppe_init_upd_mem(port);
} }
static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,

View file

@ -614,6 +614,7 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data); int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
int airoha_ppe_init(struct airoha_eth *eth); int airoha_ppe_init(struct airoha_eth *eth);
void airoha_ppe_deinit(struct airoha_eth *eth); void airoha_ppe_deinit(struct airoha_eth *eth);
void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
u32 hash); u32 hash);
void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,

View file

@ -223,6 +223,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
int dsa_port = airoha_get_dsa_port(&dev); int dsa_port = airoha_get_dsa_port(&dev);
struct airoha_foe_mac_info_common *l2; struct airoha_foe_mac_info_common *l2;
u32 qdata, ports_pad, val; u32 qdata, ports_pad, val;
u8 smac_id = 0xf;
memset(hwe, 0, sizeof(*hwe)); memset(hwe, 0, sizeof(*hwe));
@ -257,6 +258,8 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
*/ */
if (airhoa_is_lan_gdm_port(port)) if (airhoa_is_lan_gdm_port(port))
val |= AIROHA_FOE_IB2_FAST_PATH; val |= AIROHA_FOE_IB2_FAST_PATH;
smac_id = port->id;
} }
if (is_multicast_ether_addr(data->eth.h_dest)) if (is_multicast_ether_addr(data->eth.h_dest))
@ -291,7 +294,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
hwe->ipv4.l2.src_mac_lo = hwe->ipv4.l2.src_mac_lo =
get_unaligned_be16(data->eth.h_source + 4); get_unaligned_be16(data->eth.h_source + 4);
} else { } else {
l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf); l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
} }
if (data->vlan.num) { if (data->vlan.num) {
@ -636,7 +639,6 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP; u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
struct airoha_foe_entry *hwe_p, hwe; struct airoha_foe_entry *hwe_p, hwe;
struct airoha_flow_table_entry *f; struct airoha_flow_table_entry *f;
struct airoha_foe_mac_info *l2;
int type; int type;
hwe_p = airoha_ppe_foe_get_entry(ppe, hash); hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
@ -653,18 +655,25 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
memcpy(&hwe, hwe_p, sizeof(*hwe_p)); memcpy(&hwe, hwe_p, sizeof(*hwe_p));
hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask); hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
l2 = &hwe.bridge.l2;
memcpy(l2, &e->data.bridge.l2, sizeof(*l2));
type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1); type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
if (type == PPE_PKT_TYPE_IPV4_HNAPT) if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple, memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
sizeof(hwe.ipv4.new_tuple)); hwe.ipv6.ib2 = e->data.bridge.ib2;
else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T && /* setting smac_id to 0xf instruct the hw to keep original
l2->common.etype == ETH_P_IP) * source mac address
l2->common.etype = ETH_P_IPV6; */
hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
0xf);
} else {
memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
sizeof(hwe.bridge.l2));
hwe.bridge.ib2 = e->data.bridge.ib2;
if (type == PPE_PKT_TYPE_IPV4_HNAPT)
memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
sizeof(hwe.ipv4.new_tuple));
}
hwe.bridge.ib2 = e->data.bridge.ib2;
hwe.bridge.data = e->data.bridge.data; hwe.bridge.data = e->data.bridge.data;
airoha_ppe_foe_commit_entry(ppe, &hwe, hash); airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
@ -1238,6 +1247,27 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
airoha_ppe_foe_insert_entry(ppe, skb, hash); airoha_ppe_foe_insert_entry(ppe, skb, hash);
} }
void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
{
struct airoha_eth *eth = port->qdma->eth;
struct net_device *dev = port->dev;
const u8 *addr = dev->dev_addr;
u32 val;
val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
val = (addr[0] << 8) | addr[1];
airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
}
int airoha_ppe_init(struct airoha_eth *eth) int airoha_ppe_init(struct airoha_eth *eth)
{ {
struct airoha_ppe *ppe; struct airoha_ppe *ppe;

View file

@ -313,6 +313,16 @@
#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320) #define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2)) #define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
#define REG_UPDMEM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x370)
#define PPE_UPDMEM_ACK_MASK BIT(31)
#define PPE_UPDMEM_ADDR_MASK GENMASK(11, 8)
#define PPE_UPDMEM_OFFSET_MASK GENMASK(7, 4)
#define PPE_UPDMEM_SEL_MASK GENMASK(3, 2)
#define PPE_UPDMEM_WR_MASK BIT(1)
#define PPE_UPDMEM_REQ_MASK BIT(0)
#define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374)
#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) #define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) #define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288) #define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)

View file

@ -2153,7 +2153,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
}; };
stats[stats_idx++] = (struct stats) { stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(RX_BUFFERS_POSTED), .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
.value = cpu_to_be64(priv->rx[0].fill_cnt), .value = cpu_to_be64(priv->rx[idx].fill_cnt),
.queue_id = cpu_to_be32(idx), .queue_id = cpu_to_be32(idx),
}; };
} }

View file

@ -764,6 +764,9 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
s16 completion_tag; s16 completion_tag;
pkt = gve_alloc_pending_packet(tx); pkt = gve_alloc_pending_packet(tx);
if (!pkt)
return -ENOMEM;
pkt->skb = skb; pkt->skb = skb;
completion_tag = pkt - tx->dqo.pending_packets; completion_tag = pkt - tx->dqo.pending_packets;

View file

@ -324,8 +324,6 @@ static __init int hinic3_nic_lld_init(void)
{ {
int err; int err;
pr_info("%s: %s\n", HINIC3_NIC_DRV_NAME, HINIC3_NIC_DRV_DESC);
err = hinic3_lld_init(); err = hinic3_lld_init();
if (err) if (err)
return err; return err;

View file

@ -268,7 +268,6 @@ struct iavf_adapter {
struct list_head vlan_filter_list; struct list_head vlan_filter_list;
int num_vlan_filters; int num_vlan_filters;
struct list_head mac_filter_list; struct list_head mac_filter_list;
struct mutex crit_lock;
/* Lock to protect accesses to MAC and VLAN lists */ /* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock; spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9]; char misc_vector_name[IFNAMSIZ + 9];

View file

@ -4,6 +4,8 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <net/netdev_lock.h>
/* ethtool support for iavf */ /* ethtool support for iavf */
#include "iavf.h" #include "iavf.h"
@ -1256,9 +1258,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
{ {
struct ethtool_rx_flow_spec *fsp = &cmd->fs; struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct iavf_fdir_fltr *fltr; struct iavf_fdir_fltr *fltr;
int count = 50;
int err; int err;
netdev_assert_locked(adapter->netdev);
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -1277,14 +1280,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (!fltr) if (!fltr)
return -ENOMEM; return -ENOMEM;
while (!mutex_trylock(&adapter->crit_lock)) {
if (--count == 0) {
kfree(fltr);
return -EINVAL;
}
udelay(1);
}
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
if (!err) if (!err)
err = iavf_fdir_add_fltr(adapter, fltr); err = iavf_fdir_add_fltr(adapter, fltr);
@ -1292,7 +1287,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (err) if (err)
kfree(fltr); kfree(fltr);
mutex_unlock(&adapter->crit_lock);
return err; return err;
} }
@ -1435,11 +1429,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
{ {
struct iavf_adv_rss *rss_old, *rss_new; struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false; bool rss_new_add = false;
int count = 50, err = 0;
bool symm = false; bool symm = false;
u64 hash_flds; u64 hash_flds;
int err = 0;
u32 hdrs; u32 hdrs;
netdev_assert_locked(adapter->netdev);
if (!ADV_RSS_SUPPORT(adapter)) if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -1463,15 +1459,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
return -EINVAL; return -EINVAL;
} }
while (!mutex_trylock(&adapter->crit_lock)) {
if (--count == 0) {
kfree(rss_new);
return -EINVAL;
}
udelay(1);
}
spin_lock_bh(&adapter->adv_rss_lock); spin_lock_bh(&adapter->adv_rss_lock);
rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
if (rss_old) { if (rss_old) {
@ -1500,8 +1487,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!err) if (!err)
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
mutex_unlock(&adapter->crit_lock);
if (!rss_new_add) if (!rss_new_add)
kfree(rss_new); kfree(rss_new);

View file

@ -1287,11 +1287,11 @@ static void iavf_configure(struct iavf_adapter *adapter)
/** /**
* iavf_up_complete - Finish the last steps of bringing up a connection * iavf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure * @adapter: board private structure
* */
* Expects to be called while holding crit_lock.
**/
static void iavf_up_complete(struct iavf_adapter *adapter) static void iavf_up_complete(struct iavf_adapter *adapter)
{ {
netdev_assert_locked(adapter->netdev);
iavf_change_state(adapter, __IAVF_RUNNING); iavf_change_state(adapter, __IAVF_RUNNING);
clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
@ -1410,13 +1410,13 @@ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
/** /**
* iavf_down - Shutdown the connection processing * iavf_down - Shutdown the connection processing
* @adapter: board private structure * @adapter: board private structure
* */
* Expects to be called while holding crit_lock.
**/
void iavf_down(struct iavf_adapter *adapter) void iavf_down(struct iavf_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
netdev_assert_locked(netdev);
if (adapter->state <= __IAVF_DOWN_PENDING) if (adapter->state <= __IAVF_DOWN_PENDING)
return; return;
@ -2025,22 +2025,21 @@ err:
* iavf_finish_config - do all netdev work that needs RTNL * iavf_finish_config - do all netdev work that needs RTNL
* @work: our work_struct * @work: our work_struct
* *
* Do work that needs both RTNL and crit_lock. * Do work that needs RTNL.
**/ */
static void iavf_finish_config(struct work_struct *work) static void iavf_finish_config(struct work_struct *work)
{ {
struct iavf_adapter *adapter; struct iavf_adapter *adapter;
bool locks_released = false; bool netdev_released = false;
int pairs, err; int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config); adapter = container_of(work, struct iavf_adapter, finish_config);
/* Always take RTNL first to prevent circular lock dependency; /* Always take RTNL first to prevent circular lock dependency;
* The dev->lock is needed to update the queue number * the dev->lock (== netdev lock) is needed to update the queue number.
*/ */
rtnl_lock(); rtnl_lock();
netdev_lock(adapter->netdev); netdev_lock(adapter->netdev);
mutex_lock(&adapter->crit_lock);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
adapter->netdev->reg_state == NETREG_REGISTERED && adapter->netdev->reg_state == NETREG_REGISTERED &&
@ -2059,22 +2058,21 @@ static void iavf_finish_config(struct work_struct *work)
netif_set_real_num_tx_queues(adapter->netdev, pairs); netif_set_real_num_tx_queues(adapter->netdev, pairs);
if (adapter->netdev->reg_state != NETREG_REGISTERED) { if (adapter->netdev->reg_state != NETREG_REGISTERED) {
mutex_unlock(&adapter->crit_lock);
netdev_unlock(adapter->netdev); netdev_unlock(adapter->netdev);
locks_released = true; netdev_released = true;
err = register_netdevice(adapter->netdev); err = register_netdevice(adapter->netdev);
if (err) { if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n", dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err); err);
/* go back and try again.*/ /* go back and try again.*/
mutex_lock(&adapter->crit_lock); netdev_lock(adapter->netdev);
iavf_free_rss(adapter); iavf_free_rss(adapter);
iavf_free_misc_irq(adapter); iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter); iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter, iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER); __IAVF_INIT_CONFIG_ADAPTER);
mutex_unlock(&adapter->crit_lock); netdev_unlock(adapter->netdev);
goto out; goto out;
} }
} }
@ -2090,10 +2088,8 @@ static void iavf_finish_config(struct work_struct *work)
} }
out: out:
if (!locks_released) { if (!netdev_released)
mutex_unlock(&adapter->crit_lock);
netdev_unlock(adapter->netdev); netdev_unlock(adapter->netdev);
}
rtnl_unlock(); rtnl_unlock();
} }
@ -2911,28 +2907,15 @@ err:
iavf_change_state(adapter, __IAVF_INIT_FAILED); iavf_change_state(adapter, __IAVF_INIT_FAILED);
} }
/** static const int IAVF_NO_RESCHED = -1;
* iavf_watchdog_task - Periodic call-back task
* @work: pointer to work_struct /* return: msec delay for requeueing itself */
**/ static int iavf_watchdog_step(struct iavf_adapter *adapter)
static void iavf_watchdog_task(struct work_struct *work)
{ {
struct iavf_adapter *adapter = container_of(work,
struct iavf_adapter,
watchdog_task.work);
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw; struct iavf_hw *hw = &adapter->hw;
u32 reg_val; u32 reg_val;
netdev_lock(netdev); netdev_assert_locked(adapter->netdev);
if (!mutex_trylock(&adapter->crit_lock)) {
if (adapter->state == __IAVF_REMOVE) {
netdev_unlock(netdev);
return;
}
goto restart_watchdog;
}
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED); iavf_change_state(adapter, __IAVF_COMM_FAILED);
@ -2940,39 +2923,19 @@ static void iavf_watchdog_task(struct work_struct *work)
switch (adapter->state) { switch (adapter->state) {
case __IAVF_STARTUP: case __IAVF_STARTUP:
iavf_startup(adapter); iavf_startup(adapter);
mutex_unlock(&adapter->crit_lock); return 30;
netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_VERSION_CHECK: case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter); iavf_init_version_check(adapter);
mutex_unlock(&adapter->crit_lock); return 30;
netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_GET_RESOURCES: case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter); iavf_init_get_resources(adapter);
mutex_unlock(&adapter->crit_lock); return 1;
netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_EXTENDED_CAPS: case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter); iavf_init_process_extended_caps(adapter);
mutex_unlock(&adapter->crit_lock); return 1;
netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_CONFIG_ADAPTER: case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter); iavf_init_config_adapter(adapter);
mutex_unlock(&adapter->crit_lock); return 1;
netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_FAILED: case __IAVF_INIT_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK, if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) { &adapter->crit_section)) {
@ -2980,27 +2943,18 @@ static void iavf_watchdog_task(struct work_struct *work)
* watchdog task, iavf_remove should handle this state * watchdog task, iavf_remove should handle this state
* as it can loop forever * as it can loop forever
*/ */
mutex_unlock(&adapter->crit_lock); return IAVF_NO_RESCHED;
netdev_unlock(netdev);
return;
} }
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n"); "Failed to communicate with PF; waiting before retry\n");
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw); iavf_shutdown_adminq(hw);
mutex_unlock(&adapter->crit_lock); return 5000;
netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task, (5 * HZ));
return;
} }
/* Try again from failed step*/ /* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state); iavf_change_state(adapter, adapter->last_state);
mutex_unlock(&adapter->crit_lock); return 1000;
netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED: case __IAVF_COMM_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK, if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) { &adapter->crit_section)) {
@ -3010,9 +2964,7 @@ static void iavf_watchdog_task(struct work_struct *work)
*/ */
iavf_change_state(adapter, __IAVF_INIT_FAILED); iavf_change_state(adapter, __IAVF_INIT_FAILED);
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
mutex_unlock(&adapter->crit_lock); return IAVF_NO_RESCHED;
netdev_unlock(netdev);
return;
} }
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK; IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
@ -3030,18 +2982,9 @@ static void iavf_watchdog_task(struct work_struct *work)
} }
adapter->aq_required = 0; adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
mutex_unlock(&adapter->crit_lock); return 10;
netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
return;
case __IAVF_RESETTING: case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock); return 2000;
netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
HZ * 2);
return;
case __IAVF_DOWN: case __IAVF_DOWN:
case __IAVF_DOWN_PENDING: case __IAVF_DOWN_PENDING:
case __IAVF_TESTING: case __IAVF_TESTING:
@ -3068,9 +3011,7 @@ static void iavf_watchdog_task(struct work_struct *work)
break; break;
case __IAVF_REMOVE: case __IAVF_REMOVE:
default: default:
mutex_unlock(&adapter->crit_lock); return IAVF_NO_RESCHED;
netdev_unlock(netdev);
return;
} }
/* check for hw reset */ /* check for hw reset */
@ -3080,24 +3021,29 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->current_op = VIRTCHNL_OP_UNKNOWN; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task, HZ * 2);
return;
} }
mutex_unlock(&adapter->crit_lock); return adapter->aq_required ? 20 : 2000;
restart_watchdog: }
netdev_unlock(netdev);
static void iavf_watchdog_task(struct work_struct *work)
{
struct iavf_adapter *adapter = container_of(work,
struct iavf_adapter,
watchdog_task.work);
struct net_device *netdev = adapter->netdev;
int msec_delay;
netdev_lock(netdev);
msec_delay = iavf_watchdog_step(adapter);
/* note that we schedule a different task */
if (adapter->state >= __IAVF_DOWN) if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task); queue_work(adapter->wq, &adapter->adminq_task);
if (adapter->aq_required)
if (msec_delay != IAVF_NO_RESCHED)
queue_delayed_work(adapter->wq, &adapter->watchdog_task, queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(20)); msecs_to_jiffies(msec_delay));
else netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
HZ * 2);
} }
/** /**
@ -3105,14 +3051,15 @@ restart_watchdog:
* @adapter: board private structure * @adapter: board private structure
* *
* Set communication failed flag and free all resources. * Set communication failed flag and free all resources.
* NOTE: This function is expected to be called with crit_lock being held. */
**/
static void iavf_disable_vf(struct iavf_adapter *adapter) static void iavf_disable_vf(struct iavf_adapter *adapter)
{ {
struct iavf_mac_filter *f, *ftmp; struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *fv, *fvtmp; struct iavf_vlan_filter *fv, *fvtmp;
struct iavf_cloud_filter *cf, *cftmp; struct iavf_cloud_filter *cf, *cftmp;
netdev_assert_locked(adapter->netdev);
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
/* We don't use netif_running() because it may be true prior to /* We don't use netif_running() because it may be true prior to
@ -3212,17 +3159,7 @@ static void iavf_reset_task(struct work_struct *work)
int i = 0, err; int i = 0, err;
bool running; bool running;
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
netdev_lock(netdev); netdev_lock(netdev);
if (!mutex_trylock(&adapter->crit_lock)) {
if (adapter->state != __IAVF_REMOVE)
queue_work(adapter->wq, &adapter->reset_task);
netdev_unlock(netdev);
return;
}
iavf_misc_irq_disable(adapter); iavf_misc_irq_disable(adapter);
if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
@ -3267,7 +3204,6 @@ static void iavf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val); reg_val);
iavf_disable_vf(adapter); iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev); netdev_unlock(netdev);
return; /* Do not attempt to reinit. It's dead, Jim. */ return; /* Do not attempt to reinit. It's dead, Jim. */
} }
@ -3411,7 +3347,6 @@ continue_reset:
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
wake_up(&adapter->reset_waitqueue); wake_up(&adapter->reset_waitqueue);
mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev); netdev_unlock(netdev);
return; return;
@ -3422,7 +3357,6 @@ reset_err:
} }
iavf_disable_vf(adapter); iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev); netdev_unlock(netdev);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
} }
@ -3435,6 +3369,7 @@ static void iavf_adminq_task(struct work_struct *work)
{ {
struct iavf_adapter *adapter = struct iavf_adapter *adapter =
container_of(work, struct iavf_adapter, adminq_task); container_of(work, struct iavf_adapter, adminq_task);
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw; struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event; struct iavf_arq_event_info event;
enum virtchnl_ops v_op; enum virtchnl_ops v_op;
@ -3442,13 +3377,7 @@ static void iavf_adminq_task(struct work_struct *work)
u32 val, oldval; u32 val, oldval;
u16 pending; u16 pending;
if (!mutex_trylock(&adapter->crit_lock)) { netdev_lock(netdev);
if (adapter->state == __IAVF_REMOVE)
return;
queue_work(adapter->wq, &adapter->adminq_task);
goto out;
}
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto unlock; goto unlock;
@ -3515,8 +3444,7 @@ static void iavf_adminq_task(struct work_struct *work)
freedom: freedom:
kfree(event.msg_buf); kfree(event.msg_buf);
unlock: unlock:
mutex_unlock(&adapter->crit_lock); netdev_unlock(netdev);
out:
/* re-enable Admin queue interrupt cause */ /* re-enable Admin queue interrupt cause */
iavf_misc_irq_enable(adapter); iavf_misc_irq_enable(adapter);
} }
@ -4209,8 +4137,8 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower) struct flow_cls_offload *cls_flower)
{ {
int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
struct iavf_cloud_filter *filter = NULL; struct iavf_cloud_filter *filter;
int err = -EINVAL, count = 50; int err;
if (tc < 0) { if (tc < 0) {
dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
@ -4220,17 +4148,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
filter = kzalloc(sizeof(*filter), GFP_KERNEL); filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter) if (!filter)
return -ENOMEM; return -ENOMEM;
while (!mutex_trylock(&adapter->crit_lock)) {
if (--count == 0) {
kfree(filter);
return err;
}
udelay(1);
}
filter->cookie = cls_flower->cookie; filter->cookie = cls_flower->cookie;
netdev_lock(adapter->netdev);
/* bail out here if filter already exists */ /* bail out here if filter already exists */
spin_lock_bh(&adapter->cloud_filter_list_lock); spin_lock_bh(&adapter->cloud_filter_list_lock);
if (iavf_find_cf(adapter, &cls_flower->cookie)) { if (iavf_find_cf(adapter, &cls_flower->cookie)) {
@ -4264,7 +4185,7 @@ err:
if (err) if (err)
kfree(filter); kfree(filter);
mutex_unlock(&adapter->crit_lock); netdev_unlock(adapter->netdev);
return err; return err;
} }
@ -4568,28 +4489,13 @@ static int iavf_open(struct net_device *netdev)
return -EIO; return -EIO;
} }
while (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state != __IAVF_DOWN)
/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock return -EBUSY;
* is already taken and iavf_open is called from an upper
* device's notifier reacting on NETDEV_REGISTER event.
* We have to leave here to avoid dead lock.
*/
if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
return -EBUSY;
usleep_range(500, 1000);
}
if (adapter->state != __IAVF_DOWN) {
err = -EBUSY;
goto err_unlock;
}
if (adapter->state == __IAVF_RUNNING && if (adapter->state == __IAVF_RUNNING &&
!test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
err = 0; return 0;
goto err_unlock;
} }
/* allocate transmit descriptors */ /* allocate transmit descriptors */
@ -4608,9 +4514,7 @@ static int iavf_open(struct net_device *netdev)
goto err_req_irq; goto err_req_irq;
spin_lock_bh(&adapter->mac_vlan_list_lock); spin_lock_bh(&adapter->mac_vlan_list_lock);
iavf_add_filter(adapter, adapter->hw.mac.addr); iavf_add_filter(adapter, adapter->hw.mac.addr);
spin_unlock_bh(&adapter->mac_vlan_list_lock); spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* Restore filters that were removed with IFF_DOWN */ /* Restore filters that were removed with IFF_DOWN */
@ -4623,8 +4527,6 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true); iavf_irq_enable(adapter, true);
mutex_unlock(&adapter->crit_lock);
return 0; return 0;
err_req_irq: err_req_irq:
@ -4634,8 +4536,6 @@ err_setup_rx:
iavf_free_all_rx_resources(adapter); iavf_free_all_rx_resources(adapter);
err_setup_tx: err_setup_tx:
iavf_free_all_tx_resources(adapter); iavf_free_all_tx_resources(adapter);
err_unlock:
mutex_unlock(&adapter->crit_lock);
return err; return err;
} }
@ -4659,12 +4559,8 @@ static int iavf_close(struct net_device *netdev)
netdev_assert_locked(netdev); netdev_assert_locked(netdev);
mutex_lock(&adapter->crit_lock); if (adapter->state <= __IAVF_DOWN_PENDING)
if (adapter->state <= __IAVF_DOWN_PENDING) {
mutex_unlock(&adapter->crit_lock);
return 0; return 0;
}
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
@ -4695,7 +4591,6 @@ static int iavf_close(struct net_device *netdev)
iavf_change_state(adapter, __IAVF_DOWN_PENDING); iavf_change_state(adapter, __IAVF_DOWN_PENDING);
iavf_free_traffic_irqs(adapter); iavf_free_traffic_irqs(adapter);
mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev); netdev_unlock(netdev);
/* We explicitly don't free resources here because the hardware is /* We explicitly don't free resources here because the hardware is
@ -4714,11 +4609,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500)); msecs_to_jiffies(500));
if (!status) if (!status)
netdev_warn(netdev, "Device resources not yet released\n"); netdev_warn(netdev, "Device resources not yet released\n");
netdev_lock(netdev); netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
adapter->aq_required |= aq_to_restore; adapter->aq_required |= aq_to_restore;
mutex_unlock(&adapter->crit_lock);
return 0; return 0;
} }
@ -5227,15 +5121,16 @@ iavf_shaper_set(struct net_shaper_binding *binding,
struct iavf_adapter *adapter = netdev_priv(binding->netdev); struct iavf_adapter *adapter = netdev_priv(binding->netdev);
const struct net_shaper_handle *handle = &shaper->handle; const struct net_shaper_handle *handle = &shaper->handle;
struct iavf_ring *tx_ring; struct iavf_ring *tx_ring;
int ret = 0; int ret;
netdev_assert_locked(adapter->netdev);
mutex_lock(&adapter->crit_lock);
if (handle->id >= adapter->num_active_queues) if (handle->id >= adapter->num_active_queues)
goto unlock; return 0;
ret = iavf_verify_shaper(binding, shaper, extack); ret = iavf_verify_shaper(binding, shaper, extack);
if (ret) if (ret)
goto unlock; return ret;
tx_ring = &adapter->tx_rings[handle->id]; tx_ring = &adapter->tx_rings[handle->id];
@ -5245,9 +5140,7 @@ iavf_shaper_set(struct net_shaper_binding *binding,
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
unlock: return 0;
mutex_unlock(&adapter->crit_lock);
return ret;
} }
static int iavf_shaper_del(struct net_shaper_binding *binding, static int iavf_shaper_del(struct net_shaper_binding *binding,
@ -5257,9 +5150,10 @@ static int iavf_shaper_del(struct net_shaper_binding *binding,
struct iavf_adapter *adapter = netdev_priv(binding->netdev); struct iavf_adapter *adapter = netdev_priv(binding->netdev);
struct iavf_ring *tx_ring; struct iavf_ring *tx_ring;
mutex_lock(&adapter->crit_lock); netdev_assert_locked(adapter->netdev);
if (handle->id >= adapter->num_active_queues) if (handle->id >= adapter->num_active_queues)
goto unlock; return 0;
tx_ring = &adapter->tx_rings[handle->id]; tx_ring = &adapter->tx_rings[handle->id];
tx_ring->q_shaper.bw_min = 0; tx_ring->q_shaper.bw_min = 0;
@ -5268,8 +5162,6 @@ static int iavf_shaper_del(struct net_shaper_binding *binding,
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
unlock:
mutex_unlock(&adapter->crit_lock);
return 0; return 0;
} }
@ -5530,10 +5422,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_alloc_qos_cap; goto err_alloc_qos_cap;
} }
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
*/
mutex_init(&adapter->crit_lock);
mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex); mutex_init(&hw->aq.arq_mutex);
@ -5596,22 +5484,24 @@ static int iavf_suspend(struct device *dev_d)
{ {
struct net_device *netdev = dev_get_drvdata(dev_d); struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adapter *adapter = netdev_priv(netdev);
bool running;
netif_device_detach(netdev); netif_device_detach(netdev);
netdev_lock(netdev); running = netif_running(netdev);
mutex_lock(&adapter->crit_lock); if (running)
if (netif_running(netdev)) {
rtnl_lock(); rtnl_lock();
netdev_lock(netdev);
if (running)
iavf_down(adapter); iavf_down(adapter);
rtnl_unlock();
}
iavf_free_misc_irq(adapter); iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter); iavf_reset_interrupt_capability(adapter);
mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev); netdev_unlock(netdev);
if (running)
rtnl_unlock();
return 0; return 0;
} }
@ -5688,20 +5578,20 @@ static void iavf_remove(struct pci_dev *pdev)
* There are flows where register/unregister netdev may race. * There are flows where register/unregister netdev may race.
*/ */
while (1) { while (1) {
mutex_lock(&adapter->crit_lock); netdev_lock(netdev);
if (adapter->state == __IAVF_RUNNING || if (adapter->state == __IAVF_RUNNING ||
adapter->state == __IAVF_DOWN || adapter->state == __IAVF_DOWN ||
adapter->state == __IAVF_INIT_FAILED) { adapter->state == __IAVF_INIT_FAILED) {
mutex_unlock(&adapter->crit_lock); netdev_unlock(netdev);
break; break;
} }
/* Simply return if we already went through iavf_shutdown */ /* Simply return if we already went through iavf_shutdown */
if (adapter->state == __IAVF_REMOVE) { if (adapter->state == __IAVF_REMOVE) {
mutex_unlock(&adapter->crit_lock); netdev_unlock(netdev);
return; return;
} }
mutex_unlock(&adapter->crit_lock); netdev_unlock(netdev);
usleep_range(500, 1000); usleep_range(500, 1000);
} }
cancel_delayed_work_sync(&adapter->watchdog_task); cancel_delayed_work_sync(&adapter->watchdog_task);
@ -5711,7 +5601,6 @@ static void iavf_remove(struct pci_dev *pdev)
unregister_netdev(netdev); unregister_netdev(netdev);
netdev_lock(netdev); netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
dev_info(&adapter->pdev->dev, "Removing device\n"); dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE); iavf_change_state(adapter, __IAVF_REMOVE);
@ -5727,9 +5616,11 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_misc_irq_disable(adapter); iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */ /* Shut down all the garbage mashers on the detention level */
netdev_unlock(netdev);
cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->watchdog_task); cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->adminq_task); cancel_work_sync(&adapter->adminq_task);
netdev_lock(netdev);
adapter->aq_required = 0; adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
@ -5747,8 +5638,6 @@ static void iavf_remove(struct pci_dev *pdev)
/* destroy the locks only once, here */ /* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.arq_mutex);
mutex_destroy(&hw->aq.asq_mutex); mutex_destroy(&hw->aq.asq_mutex);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
netdev_unlock(netdev); netdev_unlock(netdev);
iounmap(hw->hw_addr); iounmap(hw->hw_addr);

View file

@ -2740,6 +2740,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi)
} }
} }
/**
* ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
* @vsi: the VSI with XDP rings being unmapped
*/
static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
{
int v_idx;
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
struct ice_tx_ring *ring;
ice_for_each_tx_ring(ring, q_vector->tx)
if (!ring->tx_buf || !ice_ring_is_xdp(ring))
break;
/* restore the value of last node prior to XDP setup */
q_vector->tx.tx_ring = ring;
}
}
/** /**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP * @vsi: VSI to bring up Tx rings used by XDP
@ -2803,7 +2824,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (status) { if (status) {
dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
status); status);
goto clear_xdp_rings; goto unmap_xdp_rings;
} }
/* assign the prog only when it's not already present on VSI; /* assign the prog only when it's not already present on VSI;
@ -2819,6 +2840,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
ice_vsi_assign_bpf_prog(vsi, prog); ice_vsi_assign_bpf_prog(vsi, prog);
return 0; return 0;
unmap_xdp_rings:
ice_unmap_xdp_rings(vsi);
clear_xdp_rings: clear_xdp_rings:
ice_for_each_xdp_txq(vsi, i) ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) { if (vsi->xdp_rings[i]) {
@ -2835,6 +2858,8 @@ err_map_xdp:
mutex_unlock(&pf->avail_q_mutex); mutex_unlock(&pf->avail_q_mutex);
devm_kfree(dev, vsi->xdp_rings); devm_kfree(dev, vsi->xdp_rings);
vsi->xdp_rings = NULL;
return -ENOMEM; return -ENOMEM;
} }
@ -2850,7 +2875,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{ {
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int i, v_idx; int i;
/* q_vectors are freed in reset path so there's no point in detaching /* q_vectors are freed in reset path so there's no point in detaching
* rings * rings
@ -2858,17 +2883,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
if (cfg_type == ICE_XDP_CFG_PART) if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap; goto free_qmap;
ice_for_each_q_vector(vsi, v_idx) { ice_unmap_xdp_rings(vsi);
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
struct ice_tx_ring *ring;
ice_for_each_tx_ring(ring, q_vector->tx)
if (!ring->tx_buf || !ice_ring_is_xdp(ring))
break;
/* restore the value of last node prior to XDP setup */
q_vector->tx.tx_ring = ring;
}
free_qmap: free_qmap:
mutex_lock(&pf->avail_q_mutex); mutex_lock(&pf->avail_q_mutex);
@ -3013,11 +3028,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_ring_err = ice_vsi_determine_xdp_res(vsi); xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
if (xdp_ring_err) { if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
goto resume_if;
} else { } else {
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
ICE_XDP_CFG_FULL); ICE_XDP_CFG_FULL);
if (xdp_ring_err) if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
goto resume_if;
}
} }
xdp_features_set_redirect_target(vsi->netdev, true); xdp_features_set_redirect_target(vsi->netdev, true);
/* reallocate Rx queues that are used for zero-copy */ /* reallocate Rx queues that are used for zero-copy */
@ -3035,6 +3053,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
} }
resume_if:
if (if_running) if (if_running)
ret = ice_up(vsi); ret = ice_up(vsi);

View file

@ -84,6 +84,27 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
return NULL; return NULL;
} }
/**
* ice_sched_find_next_vsi_node - find the next node for a given VSI
* @vsi_node: VSI support node to start search with
*
* Return: Next VSI support node, or NULL.
*
* The function returns a pointer to the next node from the VSI layer
* assigned to the given VSI, or NULL if there is no such a node.
*/
static struct ice_sched_node *
ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node)
{
unsigned int vsi_handle = vsi_node->vsi_handle;
while ((vsi_node = vsi_node->sibling) != NULL)
if (vsi_node->vsi_handle == vsi_handle)
break;
return vsi_node;
}
/** /**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
@ -1084,8 +1105,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (parent->num_children < max_child_nodes) { if (parent->num_children < max_child_nodes) {
new_num_nodes = max_child_nodes - parent->num_children; new_num_nodes = max_child_nodes - parent->num_children;
} else { } else {
/* This parent is full, try the next sibling */ /* This parent is full,
parent = parent->sibling; * try the next available sibling.
*/
parent = ice_sched_find_next_vsi_node(parent);
/* Don't modify the first node TEID memory if the /* Don't modify the first node TEID memory if the
* first node was added already in the above call. * first node was added already in the above call.
* Instead send some temp memory for all other * Instead send some temp memory for all other
@ -1528,12 +1551,23 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
/* get the first queue group node from VSI sub-tree */ /* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) { while (qgrp_node) {
struct ice_sched_node *next_vsi_node;
/* make sure the qgroup node is part of the VSI subtree */ /* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
if (qgrp_node->num_children < max_children && if (qgrp_node->num_children < max_children &&
qgrp_node->owner == owner) qgrp_node->owner == owner)
break; break;
qgrp_node = qgrp_node->sibling; qgrp_node = qgrp_node->sibling;
if (qgrp_node)
continue;
next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
if (!next_vsi_node)
break;
vsi_node = next_vsi_node;
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
} }
/* Select the best queue group */ /* Select the best queue group */
@ -1604,16 +1638,16 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/** /**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @num_qs: number of queues * @num_new_qs: number of new queues that will be added to the tree
* @num_nodes: num nodes array * @num_nodes: num nodes array
* *
* This function calculates the number of VSI child nodes based on the * This function calculates the number of VSI child nodes based on the
* number of queues. * number of queues.
*/ */
static void static void
ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes)
{ {
u16 num = num_qs; u16 num = num_new_qs;
u8 i, qgl, vsil; u8 i, qgl, vsil;
qgl = ice_sched_get_qgrp_layer(hw); qgl = ice_sched_get_qgrp_layer(hw);
@ -1779,7 +1813,11 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!parent) if (!parent)
return -EIO; return -EIO;
if (i == vsil) /* Do not modify the VSI handle for already existing VSI nodes,
* (if no new VSI node was added to the tree).
* Assign the VSI handle only to newly added VSI nodes.
*/
if (i == vsil && num_added)
parent->vsi_handle = vsi_handle; parent->vsi_handle = vsi_handle;
} }
@ -1812,6 +1850,41 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
num_nodes); num_nodes);
} }
/**
* ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count
* @hw: pointer to the HW struct
* @vsi_node: pointer to the leftmost VSI node that needs to be extended
* @new_numqs: new number of queues that has to be handled by the VSI
* @new_num_nodes: pointer to nodes count table to modify the VSI layer entry
*
* This function recalculates the number of supported nodes that need to
* be added after adding more Tx queues for a given VSI.
* The number of new VSI support nodes that shall be added will be saved
* to the @new_num_nodes table for the VSI layer.
*/
static void
ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw,
struct ice_sched_node *vsi_node,
unsigned int new_numqs, u16 *new_num_nodes)
{
u32 vsi_nodes_cnt = 1;
u32 max_queue_cnt = 1;
u32 qgl, vsil;
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
for (u32 i = vsil; i <= qgl; i++)
max_queue_cnt *= hw->max_children[i];
while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL)
vsi_nodes_cnt++;
if (new_numqs > (max_queue_cnt * vsi_nodes_cnt))
new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) -
vsi_nodes_cnt;
}
/** /**
* ice_sched_update_vsi_child_nodes - update VSI child nodes * ice_sched_update_vsi_child_nodes - update VSI child nodes
* @pi: port information structure * @pi: port information structure
@ -1863,15 +1936,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
return status; return status;
} }
if (new_numqs) ice_sched_recalc_vsi_support_nodes(hw, vsi_node,
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); new_numqs, new_num_nodes);
/* Keep the max number of queue configuration all the time. Update the ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs,
* tree only if number of queues > previous number of queues. This may new_num_nodes);
/* Never decrease the number of queues in the tree. Update the tree
* only if number of queues > previous number of queues. This may
* leave some extra nodes in the tree if number of queues < previous * leave some extra nodes in the tree if number of queues < previous
* number but that wouldn't harm anything. Removing those extra nodes * number but that wouldn't harm anything. Removing those extra nodes
* may complicate the code if those nodes are part of SRL or * may complicate the code if those nodes are part of SRL or
* individually rate limited. * individually rate limited.
* Also, add the required VSI support nodes if the existing ones cannot
* handle the requested new number of queues.
*/ */
status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
new_num_nodes);
if (status)
return status;
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner); new_num_nodes, owner);
if (status) if (status)
@ -2012,6 +2095,58 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
} }
/**
* ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI
* @pi: port information structure
* @vsi_node: pointer to the leftmost node of the VSI to be removed
* @owner: LAN or RDMA
* @tc: TC number
*
* Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC.
*
* This function removes all the VSI support nodes associated with a given VSI
* and its LAN or RDMA children nodes from the scheduler tree.
*/
static int
ice_sched_rm_vsi_subtree(struct ice_port_info *pi,
struct ice_sched_node *vsi_node, u8 owner, u8 tc)
{
u16 vsi_handle = vsi_node->vsi_handle;
bool all_vsi_nodes_removed = true;
int j = 0;
while (vsi_node) {
struct ice_sched_node *next_vsi_node;
if (ice_sched_is_leaf_node_present(vsi_node)) {
ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc);
return -EBUSY;
}
while (j < vsi_node->num_children) {
if (vsi_node->children[j]->owner == owner)
ice_free_sched_node(pi, vsi_node->children[j]);
else
j++;
}
next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
/* remove the VSI if it has no children */
if (!vsi_node->num_children)
ice_free_sched_node(pi, vsi_node);
else
all_vsi_nodes_removed = false;
vsi_node = next_vsi_node;
}
/* clean up aggregator related VSI info if any */
if (all_vsi_nodes_removed)
ice_sched_rm_agg_vsi_info(pi, vsi_handle);
return 0;
}
/** /**
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
* @pi: port information structure * @pi: port information structure
@ -2038,7 +2173,6 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_for_each_traffic_class(i) { ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node; struct ice_sched_node *vsi_node, *tc_node;
u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i); tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node) if (!tc_node)
@ -2048,31 +2182,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_node) if (!vsi_node)
continue; continue;
if (ice_sched_is_leaf_node_present(vsi_node)) { status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i);
ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); if (status)
status = -EBUSY;
goto exit_sched_rm_vsi_cfg; goto exit_sched_rm_vsi_cfg;
}
while (j < vsi_node->num_children) {
if (vsi_node->children[j]->owner == owner) {
ice_free_sched_node(pi, vsi_node->children[j]);
/* reset the counter again since the num vsi_ctx->sched.vsi_node[i] = NULL;
* children will be updated after node removal
*/
j = 0;
} else {
j++;
}
}
/* remove the VSI if it has no children */
if (!vsi_node->num_children) {
ice_free_sched_node(pi, vsi_node);
vsi_ctx->sched.vsi_node[i] = NULL;
/* clean up aggregator related VSI info if any */
ice_sched_rm_agg_vsi_info(pi, vsi_handle);
}
if (owner == ICE_SCHED_NODE_OWNER_LAN) if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0; vsi_ctx->sched.max_lanq[i] = 0;
else else

View file

@ -1801,11 +1801,19 @@ void idpf_vc_event_task(struct work_struct *work)
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
return; return;
if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))
test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { goto func_reset;
set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
idpf_init_hard_reset(adapter); if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))
} goto drv_load;
return;
func_reset:
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
drv_load:
set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
idpf_init_hard_reset(adapter);
} }
/** /**

View file

@ -362,17 +362,18 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{ {
struct idpf_tx_offload_params offload = { }; struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first; struct idpf_tx_buf *first;
int csum, tso, needed;
unsigned int count; unsigned int count;
__be16 protocol; __be16 protocol;
int csum, tso;
count = idpf_tx_desc_count_required(tx_q, skb); count = idpf_tx_desc_count_required(tx_q, skb);
if (unlikely(!count)) if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb); return idpf_tx_drop_skb(tx_q, skb);
if (idpf_tx_maybe_stop_common(tx_q, needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX;
count + IDPF_TX_DESCS_PER_CACHE_LINE + if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
IDPF_TX_DESCS_FOR_CTX)) { IDPF_DESC_UNUSED(tx_q),
needed, needed)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
u64_stats_update_begin(&tx_q->stats_sync); u64_stats_update_begin(&tx_q->stats_sync);

View file

@ -2184,6 +2184,19 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
} }
/* Global conditions to tell whether the txq (and related resources)
* has room to allow the use of "size" descriptors.
*/
static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
{
if (IDPF_DESC_UNUSED(tx_q) < size ||
IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
IDPF_TX_BUF_RSV_LOW(tx_q))
return 0;
return 1;
}
/** /**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked * @tx_q: the queue to be checked
@ -2194,29 +2207,11 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed) unsigned int descs_needed)
{ {
if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
goto out; idpf_txq_has_room(tx_q, descs_needed),
1, 1))
return 0;
/* If there are too many outstanding completions expected on the
* completion queue, stop the TX queue to give the device some time to
* catch up
*/
if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
goto splitq_stop;
/* Also check for available book keeping buffers; if we are low, stop
* the queue to wait for more completions
*/
if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
goto splitq_stop;
return 0;
splitq_stop:
netif_stop_subqueue(tx_q->netdev, tx_q->idx);
out:
u64_stats_update_begin(&tx_q->stats_sync); u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync); u64_stats_update_end(&tx_q->stats_sync);
@ -2242,12 +2237,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val; tx_q->next_to_use = val;
if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
}
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,

View file

@ -1049,12 +1049,4 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count); u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
u32 needed)
{
return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
IDPF_DESC_UNUSED(tx_q),
needed, needed);
}
#endif /* !_IDPF_TXRX_H_ */ #endif /* !_IDPF_TXRX_H_ */

View file

@ -347,7 +347,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
* All waiting threads will be woken-up and their transaction aborted. Further * All waiting threads will be woken-up and their transaction aborted. Further
* operations on that object will fail. * operations on that object will fail.
*/ */
static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{ {
int i; int i;

View file

@ -150,5 +150,6 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
#endif /* _IDPF_VIRTCHNL_H_ */ #endif /* _IDPF_VIRTCHNL_H_ */

View file

@ -1463,6 +1463,8 @@ static __maybe_unused int mtk_star_suspend(struct device *dev)
if (netif_running(ndev)) if (netif_running(ndev))
mtk_star_disable(ndev); mtk_star_disable(ndev);
netif_device_detach(ndev);
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
return 0; return 0;
@ -1487,6 +1489,8 @@ static __maybe_unused int mtk_star_resume(struct device *dev)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
} }
netif_device_attach(ndev);
return ret; return ret;
} }

View file

@ -249,7 +249,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
static u32 freq_to_shift(u16 freq) static u32 freq_to_shift(u16 freq)
{ {
u32 freq_khz = freq * 1000; u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; u64 max_val_cycles = freq_khz * 1000ULL * MLX4_EN_WRAP_AROUND_SEC;
u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1); u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
/* calculate max possible multiplier in order to fit in 64bit */ /* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded); u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);

View file

@ -879,6 +879,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
lan966x_vlan_port_set_vlan_aware(port, 0); lan966x_vlan_port_set_vlan_aware(port, 0);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port); lan966x_vlan_port_apply(port);
lan966x_vlan_port_rew_host(port);
return 0; return 0;
} }

View file

@ -497,6 +497,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port);
bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid); bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port, void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
bool vlan_aware); bool vlan_aware);
void lan966x_vlan_port_rew_host(struct lan966x_port *port);
int lan966x_vlan_port_set_vid(struct lan966x_port *port, int lan966x_vlan_port_set_vid(struct lan966x_port *port,
u16 vid, u16 vid,
bool pvid, bool pvid,

View file

@ -297,6 +297,7 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
lan966x_vlan_port_set_vlan_aware(port, false); lan966x_vlan_port_set_vlan_aware(port, false);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port); lan966x_vlan_port_apply(port);
lan966x_vlan_port_rew_host(port);
} }
int lan966x_port_changeupper(struct net_device *dev, int lan966x_port_changeupper(struct net_device *dev,

View file

@ -149,6 +149,27 @@ void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
port->vlan_aware = vlan_aware; port->vlan_aware = vlan_aware;
} }
/* When the interface is in host mode, the interface should not be vlan aware
* but it should insert all the tags that it gets from the network stack.
* The tags are not in the data of the frame but actually in the skb and the ifh
* is configured already to get this tag. So what we need to do is to update the
* rewriter to insert the vlan tag for all frames which have a vlan tag
* different than 0.
*/
void lan966x_vlan_port_rew_host(struct lan966x_port *port)
{
struct lan966x *lan966x = port->lan966x;
u32 val;
/* Tag all frames except when VID=0*/
val = REW_TAG_CFG_TAG_CFG_SET(2);
/* Update only some bits in the register */
lan_rmw(val,
REW_TAG_CFG_TAG_CFG,
lan966x, REW_TAG_CFG(port->chip_port));
}
void lan966x_vlan_port_apply(struct lan966x_port *port) void lan966x_vlan_port_apply(struct lan966x_port *port)
{ {
struct lan966x *lan966x = port->lan966x; struct lan966x *lan966x = port->lan966x;

View file

@ -32,6 +32,11 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
int i, ret = 0; int i, ret = 0;
u32 ctrl; u32 ctrl;
if (!ptp_rate) {
netdev_warn(priv->dev, "Invalid PTP rate");
return -EINVAL;
}
ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false); ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false);
ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false); ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false);
ret |= est_write(est_addr, EST_TER, cfg->ter, false); ret |= est_write(est_addr, EST_TER, cfg->ter, false);

View file

@ -805,6 +805,11 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!priv->plat->clk_ptp_rate) {
netdev_err(priv->dev, "Invalid PTP clock rate");
return -EINVAL;
}
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags; priv->systime_flags = systime_flags;

View file

@ -430,6 +430,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat; struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg; struct stmmac_dma_cfg *dma_cfg;
static int bus_id = -ENODEV;
int phy_mode; int phy_mode;
void *ret; void *ret;
int rc; int rc;
@ -465,8 +466,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
of_property_read_u32(np, "max-speed", &plat->max_speed); of_property_read_u32(np, "max-speed", &plat->max_speed);
plat->bus_id = of_alias_get_id(np, "ethernet"); plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0) if (plat->bus_id < 0) {
plat->bus_id = 0; if (bus_id < 0)
bus_id = of_alias_get_highest_id("ethernet");
/* No ethernet alias found, init at -1 so first bus_id is 0 */
if (bus_id < 0)
bus_id = -1;
plat->bus_id = ++bus_id;
}
/* Default to phy auto-detection */ /* Default to phy auto-detection */
plat->phy_addr = -1; plat->phy_addr = -1;

View file

@ -317,7 +317,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
/* Calculate the clock domain crossing (CDC) error if necessary */ /* Calculate the clock domain crossing (CDC) error if necessary */
priv->plat->cdc_error_adj = 0; priv->plat->cdc_error_adj = 0;
if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) if (priv->plat->has_gmac4)
priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
/* Update the ptp clock parameters based on feature discovery, when /* Update the ptp clock parameters based on feature discovery, when

View file

@ -28,6 +28,14 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
spin_lock(&prueth->stats_lock); spin_lock(&prueth->stats_lock);
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) { for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
/* In MII mode TX lines are swapped inside ICSSG, so read Tx stats
* from slice1 for port0 and slice0 for port1 to get accurate Tx
* stats for a given port
*/
if (emac->phy_if == PHY_INTERFACE_MODE_MII &&
icssg_all_miig_stats[i].offset >= ICSSG_TX_PACKET_OFFSET &&
icssg_all_miig_stats[i].offset <= ICSSG_TX_BYTE_OFFSET)
base = stats_base[slice ^ 1];
regmap_read(prueth->miig_rt, regmap_read(prueth->miig_rt,
base + icssg_all_miig_stats[i].offset, base + icssg_all_miig_stats[i].offset,
&val); &val);

View file

@ -183,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG; xdp.command = XDP_SETUP_PROG;
xdp.prog = prog; xdp.prog = prog;
ret = dev_xdp_propagate(vf_netdev, &xdp); ret = netif_xdp_propagate(vf_netdev, &xdp);
if (ret && prog) if (ret && prog)
bpf_prog_put(prog); bpf_prog_put(prog);

View file

@ -2462,8 +2462,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
netvsc_vf_setxdp(vf_netdev, NULL);
reinit_completion(&net_device_ctx->vf_add); reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev); netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev); netdev_upper_dev_unlink(vf_netdev, ndev);
@ -2631,7 +2629,9 @@ static int netvsc_probe(struct hv_device *dev,
continue; continue;
netvsc_prepare_bonding(vf_netdev); netvsc_prepare_bonding(vf_netdev);
netdev_lock_ops(vf_netdev);
netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE); netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
netdev_unlock_ops(vf_netdev);
__netvsc_vf_setup(net, vf_netdev); __netvsc_vf_setup(net, vf_netdev);
break; break;
} }

View file

@ -134,7 +134,7 @@ void ovpn_decrypt_post(void *data, int ret)
rcu_read_lock(); rcu_read_lock();
sock = rcu_dereference(peer->sock); sock = rcu_dereference(peer->sock);
if (sock && sock->sock->sk->sk_protocol == IPPROTO_UDP) if (sock && sock->sk->sk_protocol == IPPROTO_UDP)
/* check if this peer changed local or remote endpoint */ /* check if this peer changed local or remote endpoint */
ovpn_peer_endpoints_update(peer, skb); ovpn_peer_endpoints_update(peer, skb);
rcu_read_unlock(); rcu_read_unlock();
@ -270,12 +270,12 @@ void ovpn_encrypt_post(void *data, int ret)
if (unlikely(!sock)) if (unlikely(!sock))
goto err_unlock; goto err_unlock;
switch (sock->sock->sk->sk_protocol) { switch (sock->sk->sk_protocol) {
case IPPROTO_UDP: case IPPROTO_UDP:
ovpn_udp_send_skb(peer, sock->sock, skb); ovpn_udp_send_skb(peer, sock->sk, skb);
break; break;
case IPPROTO_TCP: case IPPROTO_TCP:
ovpn_tcp_send_skb(peer, sock->sock, skb); ovpn_tcp_send_skb(peer, sock->sk, skb);
break; break;
default: default:
/* no transport configured yet */ /* no transport configured yet */

View file

@ -501,7 +501,7 @@ int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info)
/* when using a TCP socket the remote IP is not expected */ /* when using a TCP socket the remote IP is not expected */
rcu_read_lock(); rcu_read_lock();
sock = rcu_dereference(peer->sock); sock = rcu_dereference(peer->sock);
if (sock && sock->sock->sk->sk_protocol == IPPROTO_TCP && if (sock && sock->sk->sk_protocol == IPPROTO_TCP &&
(attrs[OVPN_A_PEER_REMOTE_IPV4] || (attrs[OVPN_A_PEER_REMOTE_IPV4] ||
attrs[OVPN_A_PEER_REMOTE_IPV6])) { attrs[OVPN_A_PEER_REMOTE_IPV6])) {
rcu_read_unlock(); rcu_read_unlock();
@ -559,14 +559,14 @@ static int ovpn_nl_send_peer(struct sk_buff *skb, const struct genl_info *info,
goto err_unlock; goto err_unlock;
} }
if (!net_eq(genl_info_net(info), sock_net(sock->sock->sk))) { if (!net_eq(genl_info_net(info), sock_net(sock->sk))) {
id = peernet2id_alloc(genl_info_net(info), id = peernet2id_alloc(genl_info_net(info),
sock_net(sock->sock->sk), sock_net(sock->sk),
GFP_ATOMIC); GFP_ATOMIC);
if (nla_put_s32(skb, OVPN_A_PEER_SOCKET_NETNSID, id)) if (nla_put_s32(skb, OVPN_A_PEER_SOCKET_NETNSID, id))
goto err_unlock; goto err_unlock;
} }
local_port = inet_sk(sock->sock->sk)->inet_sport; local_port = inet_sk(sock->sk)->inet_sport;
rcu_read_unlock(); rcu_read_unlock();
if (nla_put_u32(skb, OVPN_A_PEER_ID, peer->id)) if (nla_put_u32(skb, OVPN_A_PEER_ID, peer->id))
@ -1153,8 +1153,8 @@ int ovpn_nl_peer_del_notify(struct ovpn_peer *peer)
ret = -EINVAL; ret = -EINVAL;
goto err_unlock; goto err_unlock;
} }
genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk), genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sk), msg, 0,
msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC); OVPN_NLGRP_PEERS, GFP_ATOMIC);
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
@ -1218,8 +1218,8 @@ int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id)
ret = -EINVAL; ret = -EINVAL;
goto err_unlock; goto err_unlock;
} }
genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk), genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sk), msg, 0,
msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC); OVPN_NLGRP_PEERS, GFP_ATOMIC);
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;

View file

@ -1145,7 +1145,7 @@ static void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk,
if (sk) { if (sk) {
ovpn_sock = rcu_access_pointer(peer->sock); ovpn_sock = rcu_access_pointer(peer->sock);
if (!ovpn_sock || ovpn_sock->sock->sk != sk) { if (!ovpn_sock || ovpn_sock->sk != sk) {
spin_unlock_bh(&ovpn->lock); spin_unlock_bh(&ovpn->lock);
ovpn_peer_put(peer); ovpn_peer_put(peer);
return; return;
@ -1175,7 +1175,7 @@ static void ovpn_peers_release_mp(struct ovpn_priv *ovpn, struct sock *sk,
if (sk) { if (sk) {
rcu_read_lock(); rcu_read_lock();
ovpn_sock = rcu_dereference(peer->sock); ovpn_sock = rcu_dereference(peer->sock);
remove = ovpn_sock && ovpn_sock->sock->sk == sk; remove = ovpn_sock && ovpn_sock->sk == sk;
rcu_read_unlock(); rcu_read_unlock();
} }

View file

@ -24,9 +24,9 @@ static void ovpn_socket_release_kref(struct kref *kref)
struct ovpn_socket *sock = container_of(kref, struct ovpn_socket, struct ovpn_socket *sock = container_of(kref, struct ovpn_socket,
refcount); refcount);
if (sock->sock->sk->sk_protocol == IPPROTO_UDP) if (sock->sk->sk_protocol == IPPROTO_UDP)
ovpn_udp_socket_detach(sock); ovpn_udp_socket_detach(sock);
else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) else if (sock->sk->sk_protocol == IPPROTO_TCP)
ovpn_tcp_socket_detach(sock); ovpn_tcp_socket_detach(sock);
} }
@ -75,14 +75,6 @@ void ovpn_socket_release(struct ovpn_peer *peer)
if (!sock) if (!sock)
return; return;
/* sanity check: we should not end up here if the socket
* was already closed
*/
if (!sock->sock->sk) {
DEBUG_NET_WARN_ON_ONCE(1);
return;
}
/* Drop the reference while holding the sock lock to avoid /* Drop the reference while holding the sock lock to avoid
* concurrent ovpn_socket_new call to mess up with a partially * concurrent ovpn_socket_new call to mess up with a partially
* detached socket. * detached socket.
@ -90,22 +82,24 @@ void ovpn_socket_release(struct ovpn_peer *peer)
* Holding the lock ensures that a socket with refcnt 0 is fully * Holding the lock ensures that a socket with refcnt 0 is fully
* detached before it can be picked by a concurrent reader. * detached before it can be picked by a concurrent reader.
*/ */
lock_sock(sock->sock->sk); lock_sock(sock->sk);
released = ovpn_socket_put(peer, sock); released = ovpn_socket_put(peer, sock);
release_sock(sock->sock->sk); release_sock(sock->sk);
/* align all readers with sk_user_data being NULL */ /* align all readers with sk_user_data being NULL */
synchronize_rcu(); synchronize_rcu();
/* following cleanup should happen with lock released */ /* following cleanup should happen with lock released */
if (released) { if (released) {
if (sock->sock->sk->sk_protocol == IPPROTO_UDP) { if (sock->sk->sk_protocol == IPPROTO_UDP) {
netdev_put(sock->ovpn->dev, &sock->dev_tracker); netdev_put(sock->ovpn->dev, &sock->dev_tracker);
} else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) { } else if (sock->sk->sk_protocol == IPPROTO_TCP) {
/* wait for TCP jobs to terminate */ /* wait for TCP jobs to terminate */
ovpn_tcp_socket_wait_finish(sock); ovpn_tcp_socket_wait_finish(sock);
ovpn_peer_put(sock->peer); ovpn_peer_put(sock->peer);
} }
/* drop reference acquired in ovpn_socket_new() */
sock_put(sock->sk);
/* we can call plain kfree() because we already waited one RCU /* we can call plain kfree() because we already waited one RCU
* period due to synchronize_rcu() * period due to synchronize_rcu()
*/ */
@ -118,12 +112,14 @@ static bool ovpn_socket_hold(struct ovpn_socket *sock)
return kref_get_unless_zero(&sock->refcount); return kref_get_unless_zero(&sock->refcount);
} }
static int ovpn_socket_attach(struct ovpn_socket *sock, struct ovpn_peer *peer) static int ovpn_socket_attach(struct ovpn_socket *ovpn_sock,
struct socket *sock,
struct ovpn_peer *peer)
{ {
if (sock->sock->sk->sk_protocol == IPPROTO_UDP) if (sock->sk->sk_protocol == IPPROTO_UDP)
return ovpn_udp_socket_attach(sock, peer->ovpn); return ovpn_udp_socket_attach(ovpn_sock, sock, peer->ovpn);
else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) else if (sock->sk->sk_protocol == IPPROTO_TCP)
return ovpn_tcp_socket_attach(sock, peer); return ovpn_tcp_socket_attach(ovpn_sock, peer);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -138,14 +134,15 @@ static int ovpn_socket_attach(struct ovpn_socket *sock, struct ovpn_peer *peer)
struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer) struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
{ {
struct ovpn_socket *ovpn_sock; struct ovpn_socket *ovpn_sock;
struct sock *sk = sock->sk;
int ret; int ret;
lock_sock(sock->sk); lock_sock(sk);
/* a TCP socket can only be owned by a single peer, therefore there /* a TCP socket can only be owned by a single peer, therefore there
* can't be any other user * can't be any other user
*/ */
if (sock->sk->sk_protocol == IPPROTO_TCP && sock->sk->sk_user_data) { if (sk->sk_protocol == IPPROTO_TCP && sk->sk_user_data) {
ovpn_sock = ERR_PTR(-EBUSY); ovpn_sock = ERR_PTR(-EBUSY);
goto sock_release; goto sock_release;
} }
@ -153,8 +150,8 @@ struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
/* a UDP socket can be shared across multiple peers, but we must make /* a UDP socket can be shared across multiple peers, but we must make
* sure it is not owned by something else * sure it is not owned by something else
*/ */
if (sock->sk->sk_protocol == IPPROTO_UDP) { if (sk->sk_protocol == IPPROTO_UDP) {
u8 type = READ_ONCE(udp_sk(sock->sk)->encap_type); u8 type = READ_ONCE(udp_sk(sk)->encap_type);
/* socket owned by other encapsulation module */ /* socket owned by other encapsulation module */
if (type && type != UDP_ENCAP_OVPNINUDP) { if (type && type != UDP_ENCAP_OVPNINUDP) {
@ -163,7 +160,7 @@ struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
} }
rcu_read_lock(); rcu_read_lock();
ovpn_sock = rcu_dereference_sk_user_data(sock->sk); ovpn_sock = rcu_dereference_sk_user_data(sk);
if (ovpn_sock) { if (ovpn_sock) {
/* socket owned by another ovpn instance, we can't use it */ /* socket owned by another ovpn instance, we can't use it */
if (ovpn_sock->ovpn != peer->ovpn) { if (ovpn_sock->ovpn != peer->ovpn) {
@ -200,11 +197,22 @@ struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
goto sock_release; goto sock_release;
} }
ovpn_sock->sock = sock; ovpn_sock->sk = sk;
kref_init(&ovpn_sock->refcount); kref_init(&ovpn_sock->refcount);
ret = ovpn_socket_attach(ovpn_sock, peer); /* the newly created ovpn_socket is holding reference to sk,
* therefore we increase its refcounter.
*
* This ovpn_socket instance is referenced by all peers
* using the same socket.
*
* ovpn_socket_release() will take care of dropping the reference.
*/
sock_hold(sk);
ret = ovpn_socket_attach(ovpn_sock, sock, peer);
if (ret < 0) { if (ret < 0) {
sock_put(sk);
kfree(ovpn_sock); kfree(ovpn_sock);
ovpn_sock = ERR_PTR(ret); ovpn_sock = ERR_PTR(ret);
goto sock_release; goto sock_release;
@ -213,11 +221,11 @@ struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
/* TCP sockets are per-peer, therefore they are linked to their unique /* TCP sockets are per-peer, therefore they are linked to their unique
* peer * peer
*/ */
if (sock->sk->sk_protocol == IPPROTO_TCP) { if (sk->sk_protocol == IPPROTO_TCP) {
INIT_WORK(&ovpn_sock->tcp_tx_work, ovpn_tcp_tx_work); INIT_WORK(&ovpn_sock->tcp_tx_work, ovpn_tcp_tx_work);
ovpn_sock->peer = peer; ovpn_sock->peer = peer;
ovpn_peer_hold(peer); ovpn_peer_hold(peer);
} else if (sock->sk->sk_protocol == IPPROTO_UDP) { } else if (sk->sk_protocol == IPPROTO_UDP) {
/* in UDP we only link the ovpn instance since the socket is /* in UDP we only link the ovpn instance since the socket is
* shared among multiple peers * shared among multiple peers
*/ */
@ -226,8 +234,8 @@ struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
GFP_KERNEL); GFP_KERNEL);
} }
rcu_assign_sk_user_data(sock->sk, ovpn_sock); rcu_assign_sk_user_data(sk, ovpn_sock);
sock_release: sock_release:
release_sock(sock->sk); release_sock(sk);
return ovpn_sock; return ovpn_sock;
} }

View file

@ -22,7 +22,7 @@ struct ovpn_peer;
* @ovpn: ovpn instance owning this socket (UDP only) * @ovpn: ovpn instance owning this socket (UDP only)
* @dev_tracker: reference tracker for associated dev (UDP only) * @dev_tracker: reference tracker for associated dev (UDP only)
* @peer: unique peer transmitting over this socket (TCP only) * @peer: unique peer transmitting over this socket (TCP only)
* @sock: the low level sock object * @sk: the low level sock object
* @refcount: amount of contexts currently referencing this object * @refcount: amount of contexts currently referencing this object
* @work: member used to schedule release routine (it may block) * @work: member used to schedule release routine (it may block)
* @tcp_tx_work: work for deferring outgoing packet processing (TCP only) * @tcp_tx_work: work for deferring outgoing packet processing (TCP only)
@ -36,7 +36,7 @@ struct ovpn_socket {
struct ovpn_peer *peer; struct ovpn_peer *peer;
}; };
struct socket *sock; struct sock *sk;
struct kref refcount; struct kref refcount;
struct work_struct work; struct work_struct work;
struct work_struct tcp_tx_work; struct work_struct tcp_tx_work;

View file

@ -124,14 +124,18 @@ static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb)
* this peer, therefore ovpn_peer_hold() is not expected to fail * this peer, therefore ovpn_peer_hold() is not expected to fail
*/ */
if (WARN_ON(!ovpn_peer_hold(peer))) if (WARN_ON(!ovpn_peer_hold(peer)))
goto err; goto err_nopeer;
ovpn_recv(peer, skb); ovpn_recv(peer, skb);
return; return;
err: err:
/* take reference for deferred peer deletion. should never fail */
if (WARN_ON(!ovpn_peer_hold(peer)))
goto err_nopeer;
schedule_work(&peer->tcp.defer_del_work);
dev_dstats_rx_dropped(peer->ovpn->dev); dev_dstats_rx_dropped(peer->ovpn->dev);
err_nopeer:
kfree_skb(skb); kfree_skb(skb);
ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR);
} }
static int ovpn_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, static int ovpn_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
@ -186,18 +190,18 @@ out:
void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock) void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock)
{ {
struct ovpn_peer *peer = ovpn_sock->peer; struct ovpn_peer *peer = ovpn_sock->peer;
struct socket *sock = ovpn_sock->sock; struct sock *sk = ovpn_sock->sk;
strp_stop(&peer->tcp.strp); strp_stop(&peer->tcp.strp);
skb_queue_purge(&peer->tcp.user_queue); skb_queue_purge(&peer->tcp.user_queue);
/* restore CBs that were saved in ovpn_sock_set_tcp_cb() */ /* restore CBs that were saved in ovpn_sock_set_tcp_cb() */
sock->sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready; sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready;
sock->sk->sk_write_space = peer->tcp.sk_cb.sk_write_space; sk->sk_write_space = peer->tcp.sk_cb.sk_write_space;
sock->sk->sk_prot = peer->tcp.sk_cb.prot; sk->sk_prot = peer->tcp.sk_cb.prot;
sock->sk->sk_socket->ops = peer->tcp.sk_cb.ops; sk->sk_socket->ops = peer->tcp.sk_cb.ops;
rcu_assign_sk_user_data(sock->sk, NULL); rcu_assign_sk_user_data(sk, NULL);
} }
void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock) void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock)
@ -283,10 +287,10 @@ void ovpn_tcp_tx_work(struct work_struct *work)
sock = container_of(work, struct ovpn_socket, tcp_tx_work); sock = container_of(work, struct ovpn_socket, tcp_tx_work);
lock_sock(sock->sock->sk); lock_sock(sock->sk);
if (sock->peer) if (sock->peer)
ovpn_tcp_send_sock(sock->peer, sock->sock->sk); ovpn_tcp_send_sock(sock->peer, sock->sk);
release_sock(sock->sock->sk); release_sock(sock->sk);
} }
static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk, static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk,
@ -307,15 +311,15 @@ static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk,
ovpn_tcp_send_sock(peer, sk); ovpn_tcp_send_sock(peer, sk);
} }
void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock, void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk,
struct sk_buff *skb) struct sk_buff *skb)
{ {
u16 len = skb->len; u16 len = skb->len;
*(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len); *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len);
spin_lock_nested(&sock->sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING); spin_lock_nested(&sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING);
if (sock_owned_by_user(sock->sk)) { if (sock_owned_by_user(sk)) {
if (skb_queue_len(&peer->tcp.out_queue) >= if (skb_queue_len(&peer->tcp.out_queue) >=
READ_ONCE(net_hotdata.max_backlog)) { READ_ONCE(net_hotdata.max_backlog)) {
dev_dstats_tx_dropped(peer->ovpn->dev); dev_dstats_tx_dropped(peer->ovpn->dev);
@ -324,10 +328,10 @@ void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock,
} }
__skb_queue_tail(&peer->tcp.out_queue, skb); __skb_queue_tail(&peer->tcp.out_queue, skb);
} else { } else {
ovpn_tcp_send_sock_skb(peer, sock->sk, skb); ovpn_tcp_send_sock_skb(peer, sk, skb);
} }
unlock: unlock:
spin_unlock(&sock->sk->sk_lock.slock); spin_unlock(&sk->sk_lock.slock);
} }
static void ovpn_tcp_release(struct sock *sk) static void ovpn_tcp_release(struct sock *sk)
@ -474,7 +478,6 @@ static void ovpn_tcp_peer_del_work(struct work_struct *work)
int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock, int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
struct ovpn_peer *peer) struct ovpn_peer *peer)
{ {
struct socket *sock = ovpn_sock->sock;
struct strp_callbacks cb = { struct strp_callbacks cb = {
.rcv_msg = ovpn_tcp_rcv, .rcv_msg = ovpn_tcp_rcv,
.parse_msg = ovpn_tcp_parse, .parse_msg = ovpn_tcp_parse,
@ -482,20 +485,20 @@ int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
int ret; int ret;
/* make sure no pre-existing encapsulation handler exists */ /* make sure no pre-existing encapsulation handler exists */
if (sock->sk->sk_user_data) if (ovpn_sock->sk->sk_user_data)
return -EBUSY; return -EBUSY;
/* only a fully connected socket is expected. Connection should be /* only a fully connected socket is expected. Connection should be
* handled in userspace * handled in userspace
*/ */
if (sock->sk->sk_state != TCP_ESTABLISHED) { if (ovpn_sock->sk->sk_state != TCP_ESTABLISHED) {
net_err_ratelimited("%s: provided TCP socket is not in ESTABLISHED state: %d\n", net_err_ratelimited("%s: provided TCP socket is not in ESTABLISHED state: %d\n",
netdev_name(peer->ovpn->dev), netdev_name(peer->ovpn->dev),
sock->sk->sk_state); ovpn_sock->sk->sk_state);
return -EINVAL; return -EINVAL;
} }
ret = strp_init(&peer->tcp.strp, sock->sk, &cb); ret = strp_init(&peer->tcp.strp, ovpn_sock->sk, &cb);
if (ret < 0) { if (ret < 0) {
DEBUG_NET_WARN_ON_ONCE(1); DEBUG_NET_WARN_ON_ONCE(1);
return ret; return ret;
@ -503,31 +506,31 @@ int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
INIT_WORK(&peer->tcp.defer_del_work, ovpn_tcp_peer_del_work); INIT_WORK(&peer->tcp.defer_del_work, ovpn_tcp_peer_del_work);
__sk_dst_reset(sock->sk); __sk_dst_reset(ovpn_sock->sk);
skb_queue_head_init(&peer->tcp.user_queue); skb_queue_head_init(&peer->tcp.user_queue);
skb_queue_head_init(&peer->tcp.out_queue); skb_queue_head_init(&peer->tcp.out_queue);
/* save current CBs so that they can be restored upon socket release */ /* save current CBs so that they can be restored upon socket release */
peer->tcp.sk_cb.sk_data_ready = sock->sk->sk_data_ready; peer->tcp.sk_cb.sk_data_ready = ovpn_sock->sk->sk_data_ready;
peer->tcp.sk_cb.sk_write_space = sock->sk->sk_write_space; peer->tcp.sk_cb.sk_write_space = ovpn_sock->sk->sk_write_space;
peer->tcp.sk_cb.prot = sock->sk->sk_prot; peer->tcp.sk_cb.prot = ovpn_sock->sk->sk_prot;
peer->tcp.sk_cb.ops = sock->sk->sk_socket->ops; peer->tcp.sk_cb.ops = ovpn_sock->sk->sk_socket->ops;
/* assign our static CBs and prot/ops */ /* assign our static CBs and prot/ops */
sock->sk->sk_data_ready = ovpn_tcp_data_ready; ovpn_sock->sk->sk_data_ready = ovpn_tcp_data_ready;
sock->sk->sk_write_space = ovpn_tcp_write_space; ovpn_sock->sk->sk_write_space = ovpn_tcp_write_space;
if (sock->sk->sk_family == AF_INET) { if (ovpn_sock->sk->sk_family == AF_INET) {
sock->sk->sk_prot = &ovpn_tcp_prot; ovpn_sock->sk->sk_prot = &ovpn_tcp_prot;
sock->sk->sk_socket->ops = &ovpn_tcp_ops; ovpn_sock->sk->sk_socket->ops = &ovpn_tcp_ops;
} else { } else {
sock->sk->sk_prot = &ovpn_tcp6_prot; ovpn_sock->sk->sk_prot = &ovpn_tcp6_prot;
sock->sk->sk_socket->ops = &ovpn_tcp6_ops; ovpn_sock->sk->sk_socket->ops = &ovpn_tcp6_ops;
} }
/* avoid using task_frag */ /* avoid using task_frag */
sock->sk->sk_allocation = GFP_ATOMIC; ovpn_sock->sk->sk_allocation = GFP_ATOMIC;
sock->sk->sk_use_task_frag = false; ovpn_sock->sk->sk_use_task_frag = false;
/* enqueue the RX worker */ /* enqueue the RX worker */
strp_check_rcv(&peer->tcp.strp); strp_check_rcv(&peer->tcp.strp);

View file

@ -30,7 +30,8 @@ void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock);
* Required by the OpenVPN protocol in order to extract packets from * Required by the OpenVPN protocol in order to extract packets from
* the TCP stream on the receiver side. * the TCP stream on the receiver side.
*/ */
void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock, struct sk_buff *skb); void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk,
struct sk_buff *skb);
void ovpn_tcp_tx_work(struct work_struct *work); void ovpn_tcp_tx_work(struct work_struct *work);
#endif /* _NET_OVPN_TCP_H_ */ #endif /* _NET_OVPN_TCP_H_ */

View file

@ -43,7 +43,7 @@ static struct ovpn_socket *ovpn_socket_from_udp_sock(struct sock *sk)
return NULL; return NULL;
/* make sure that sk matches our stored transport socket */ /* make sure that sk matches our stored transport socket */
if (unlikely(!ovpn_sock->sock || sk != ovpn_sock->sock->sk)) if (unlikely(!ovpn_sock->sk || sk != ovpn_sock->sk))
return NULL; return NULL;
return ovpn_sock; return ovpn_sock;
@ -335,32 +335,22 @@ out:
/** /**
* ovpn_udp_send_skb - prepare skb and send it over via UDP * ovpn_udp_send_skb - prepare skb and send it over via UDP
* @peer: the destination peer * @peer: the destination peer
* @sock: the RCU protected peer socket * @sk: peer socket
* @skb: the packet to send * @skb: the packet to send
*/ */
void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock, void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk,
struct sk_buff *skb) struct sk_buff *skb)
{ {
int ret = -1; int ret;
skb->dev = peer->ovpn->dev; skb->dev = peer->ovpn->dev;
/* no checksum performed at this layer */ /* no checksum performed at this layer */
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
/* get socket info */
if (unlikely(!sock)) {
net_warn_ratelimited("%s: no sock for remote peer %u\n",
netdev_name(peer->ovpn->dev), peer->id);
goto out;
}
/* crypto layer -> transport (UDP) */ /* crypto layer -> transport (UDP) */
ret = ovpn_udp_output(peer, &peer->dst_cache, sock->sk, skb); ret = ovpn_udp_output(peer, &peer->dst_cache, sk, skb);
out: if (unlikely(ret < 0))
if (unlikely(ret < 0)) {
kfree_skb(skb); kfree_skb(skb);
return;
}
} }
static void ovpn_udp_encap_destroy(struct sock *sk) static void ovpn_udp_encap_destroy(struct sock *sk)
@ -383,6 +373,7 @@ static void ovpn_udp_encap_destroy(struct sock *sk)
/** /**
* ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn * ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn
* @ovpn_sock: socket to configure * @ovpn_sock: socket to configure
* @sock: the socket container to be passed to setup_udp_tunnel_sock()
* @ovpn: the openvp instance to link * @ovpn: the openvp instance to link
* *
* After invoking this function, the sock will be controlled by ovpn so that * After invoking this function, the sock will be controlled by ovpn so that
@ -390,7 +381,7 @@ static void ovpn_udp_encap_destroy(struct sock *sk)
* *
* Return: 0 on success or a negative error code otherwise * Return: 0 on success or a negative error code otherwise
*/ */
int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock,
struct ovpn_priv *ovpn) struct ovpn_priv *ovpn)
{ {
struct udp_tunnel_sock_cfg cfg = { struct udp_tunnel_sock_cfg cfg = {
@ -398,17 +389,16 @@ int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
.encap_rcv = ovpn_udp_encap_recv, .encap_rcv = ovpn_udp_encap_recv,
.encap_destroy = ovpn_udp_encap_destroy, .encap_destroy = ovpn_udp_encap_destroy,
}; };
struct socket *sock = ovpn_sock->sock;
struct ovpn_socket *old_data; struct ovpn_socket *old_data;
int ret; int ret;
/* make sure no pre-existing encapsulation handler exists */ /* make sure no pre-existing encapsulation handler exists */
rcu_read_lock(); rcu_read_lock();
old_data = rcu_dereference_sk_user_data(sock->sk); old_data = rcu_dereference_sk_user_data(ovpn_sock->sk);
if (!old_data) { if (!old_data) {
/* socket is currently unused - we can take it */ /* socket is currently unused - we can take it */
rcu_read_unlock(); rcu_read_unlock();
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg); setup_udp_tunnel_sock(sock_net(ovpn_sock->sk), sock, &cfg);
return 0; return 0;
} }
@ -421,7 +411,7 @@ int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
* Unlikely TCP, a single UDP socket can be used to talk to many remote * Unlikely TCP, a single UDP socket can be used to talk to many remote
* hosts and therefore openvpn instantiates one only for all its peers * hosts and therefore openvpn instantiates one only for all its peers
*/ */
if ((READ_ONCE(udp_sk(sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) && if ((READ_ONCE(udp_sk(ovpn_sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) &&
old_data->ovpn == ovpn) { old_data->ovpn == ovpn) {
netdev_dbg(ovpn->dev, netdev_dbg(ovpn->dev,
"provided socket already owned by this interface\n"); "provided socket already owned by this interface\n");
@ -442,8 +432,16 @@ int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
*/ */
void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock) void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock)
{ {
struct udp_tunnel_sock_cfg cfg = { }; struct sock *sk = ovpn_sock->sk;
setup_udp_tunnel_sock(sock_net(ovpn_sock->sock->sk), ovpn_sock->sock, /* Re-enable multicast loopback */
&cfg); inet_set_bit(MC_LOOP, sk);
/* Disable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
inet_dec_convert_csum(sk);
WRITE_ONCE(udp_sk(sk)->encap_type, 0);
WRITE_ONCE(udp_sk(sk)->encap_rcv, NULL);
WRITE_ONCE(udp_sk(sk)->encap_destroy, NULL);
rcu_assign_sk_user_data(sk, NULL);
} }

View file

@ -15,11 +15,11 @@ struct ovpn_peer;
struct ovpn_priv; struct ovpn_priv;
struct socket; struct socket;
int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock,
struct ovpn_priv *ovpn); struct ovpn_priv *ovpn);
void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock); void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock);
void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock, void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk,
struct sk_buff *skb); struct sk_buff *skb);
#endif /* _NET_OVPN_UDP_H_ */ #endif /* _NET_OVPN_UDP_H_ */

View file

@ -31,11 +31,11 @@ static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
USB_RECIP_DEVICE, value, index, data, size); USB_RECIP_DEVICE, value, index, data, size);
if (unlikely(ret < size)) { if (unlikely(ret < size)) {
ret = ret < 0 ? ret : -ENODATA;
netdev_warn(dev->net, netdev_warn(dev->net,
"Failed to read(0x%x) reg index 0x%04x: %d\n", "Failed to read(0x%x) reg index 0x%04x: %d\n",
cmd, index, ret); cmd, index, ret);
ret = ret < 0 ? ret : -ENODATA;
} }
return ret; return ret;
@ -50,11 +50,11 @@ static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value,
USB_RECIP_DEVICE, value, index, data, size); USB_RECIP_DEVICE, value, index, data, size);
if (unlikely(ret < size)) { if (unlikely(ret < size)) {
ret = ret < 0 ? ret : -ENODATA;
netdev_warn(dev->net, netdev_warn(dev->net,
"Failed to read(0x%x) reg index 0x%04x: %d\n", "Failed to read(0x%x) reg index 0x%04x: %d\n",
cmd, index, ret); cmd, index, ret);
ret = ret < 0 ? ret : -ENODATA;
} }
return ret; return ret;

View file

@ -178,6 +178,7 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
{ {
struct usbnet *dev = netdev_priv(netdev); struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2]; unsigned char buff[2];
int ret;
netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n", netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
__func__, phy_id, loc); __func__, phy_id, loc);
@ -185,8 +186,10 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (phy_id != 0) if (phy_id != 0)
return -ENODEV; return -ENODEV;
control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02, ret = control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
CONTROL_TIMEOUT_MS); CONTROL_TIMEOUT_MS);
if (ret < 0)
return ret;
return (buff[0] | buff[1] << 8); return (buff[0] | buff[1] << 8);
} }

View file

@ -1572,6 +1572,30 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
return (hlen + (hdr.tcp->doff << 2)); return (hlen + (hdr.tcp->doff << 2));
} }
static void
vmxnet3_lro_tunnel(struct sk_buff *skb, __be16 ip_proto)
{
struct udphdr *uh = NULL;
if (ip_proto == htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)skb->data;
if (iph->protocol == IPPROTO_UDP)
uh = (struct udphdr *)(iph + 1);
} else {
struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
if (iph->nexthdr == IPPROTO_UDP)
uh = (struct udphdr *)(iph + 1);
}
if (uh) {
if (uh->check)
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
else
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
}
}
static int static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota) struct vmxnet3_adapter *adapter, int quota)
@ -1885,6 +1909,8 @@ sop_done:
if (segCnt != 0 && mss != 0) { if (segCnt != 0 && mss != 0) {
skb_shinfo(skb)->gso_type = rcd->v4 ? skb_shinfo(skb)->gso_type = rcd->v4 ?
SKB_GSO_TCPV4 : SKB_GSO_TCPV6; SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
if (encap_lro)
vmxnet3_lro_tunnel(skb, skb->protocol);
skb_shinfo(skb)->gso_size = mss; skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_segs = segCnt; skb_shinfo(skb)->gso_segs = segCnt;
} else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) { } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {

View file

@ -366,6 +366,7 @@ static int wg_newlink(struct net_device *dev,
if (ret < 0) if (ret < 0)
goto err_free_handshake_queue; goto err_free_handshake_queue;
dev_set_threaded(dev, true);
ret = register_netdevice(dev); ret = register_netdevice(dev);
if (ret < 0) if (ret < 0)
goto err_uninit_ratelimiter; goto err_uninit_ratelimiter;

View file

@ -349,10 +349,6 @@ int iwl_mld_load_fw(struct iwl_mld *mld)
if (ret) if (ret)
goto err; goto err;
ret = iwl_mld_init_mcc(mld);
if (ret)
goto err;
mld->fw_status.running = true; mld->fw_status.running = true;
return 0; return 0;
@ -546,6 +542,10 @@ int iwl_mld_start_fw(struct iwl_mld *mld)
if (ret) if (ret)
goto error; goto error;
ret = iwl_mld_init_mcc(mld);
if (ret)
goto error;
return 0; return 0;
error: error:

View file

@ -653,7 +653,8 @@ iwl_mld_nic_error(struct iwl_op_mode *op_mode,
* It might not actually be true that we'll restart, but the * It might not actually be true that we'll restart, but the
* setting doesn't matter if we're going to be unbound either. * setting doesn't matter if we're going to be unbound either.
*/ */
if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT) if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT &&
mld->fw_status.running)
mld->fw_status.in_hw_restart = true; mld->fw_status.in_hw_restart = true;
} }

View file

@ -6360,8 +6360,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
(struct iwl_mvm_internal_rxq_notif *)cmd->payload; (struct iwl_mvm_internal_rxq_notif *)cmd->payload;
struct iwl_host_cmd hcmd = { struct iwl_host_cmd hcmd = {
.id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD),
.data[0] = &cmd, .data[0] = cmd,
.len[0] = sizeof(cmd), .len[0] = __struct_size(cmd),
.data[1] = data, .data[1] = data,
.len[1] = size, .len[1] = size,
.flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC), .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),

View file

@ -125,7 +125,7 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
reset_done = reset_done =
inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE; inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE;
} else { } else {
inta_hw = iwl_read32(trans, CSR_INT_MASK); inta_hw = iwl_read32(trans, CSR_INT);
reset_done = inta_hw & CSR_INT_BIT_RESET_DONE; reset_done = inta_hw & CSR_INT_BIT_RESET_DONE;
} }

View file

@ -550,8 +550,8 @@ static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
struct mhi_mbim_context *mbim = ctxt; struct mhi_mbim_context *mbim = ctxt;
link->session = if_id;
link->mbim = mbim; link->mbim = mbim;
link->session = mhi_mbim_get_link_mux_id(link->mbim->mdev->mhi_cntrl) + if_id;
link->ndev = ndev; link->ndev = ndev;
u64_stats_init(&link->rx_syncp); u64_stats_init(&link->rx_syncp);
u64_stats_init(&link->tx_syncp); u64_stats_init(&link->tx_syncp);
@ -607,7 +607,7 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
{ {
struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
struct mhi_mbim_context *mbim; struct mhi_mbim_context *mbim;
int err, link_id; int err;
mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL); mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
if (!mbim) if (!mbim)
@ -628,11 +628,8 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
/* Number of transfer descriptors determines size of the queue */ /* Number of transfer descriptors determines size of the queue */
mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
/* Get the corresponding mux_id from mhi */
link_id = mhi_mbim_get_link_mux_id(cntrl);
/* Register wwan link ops with MHI controller representing WWAN instance */ /* Register wwan link ops with MHI controller representing WWAN instance */
return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, link_id); return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
} }
static void mhi_mbim_remove(struct mhi_device *mhi_dev) static void mhi_mbim_remove(struct mhi_device *mhi_dev)

View file

@ -302,7 +302,7 @@ static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id
ccmni->ctlb = ctlb; ccmni->ctlb = ctlb;
ccmni->dev = dev; ccmni->dev = dev;
atomic_set(&ccmni->usage, 0); atomic_set(&ccmni->usage, 0);
ctlb->ccmni_inst[if_id] = ccmni; WRITE_ONCE(ctlb->ccmni_inst[if_id], ccmni);
ret = register_netdevice(dev); ret = register_netdevice(dev);
if (ret) if (ret)
@ -324,6 +324,7 @@ static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct l
if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni)) if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
return; return;
WRITE_ONCE(ctlb->ccmni_inst[if_id], NULL);
unregister_netdevice(dev); unregister_netdevice(dev);
} }
@ -419,7 +420,7 @@ static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_bu
skb_cb = T7XX_SKB_CB(skb); skb_cb = T7XX_SKB_CB(skb);
netif_id = skb_cb->netif_idx; netif_id = skb_cb->netif_idx;
ccmni = ccmni_ctlb->ccmni_inst[netif_id]; ccmni = READ_ONCE(ccmni_ctlb->ccmni_inst[netif_id]);
if (!ccmni) { if (!ccmni) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
return; return;
@ -441,7 +442,7 @@ static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_bu
static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
{ {
struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]);
struct netdev_queue *net_queue; struct netdev_queue *net_queue;
if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) { if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
@ -453,7 +454,7 @@ static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno
static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
{ {
struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]);
struct netdev_queue *net_queue; struct netdev_queue *net_queue;
if (atomic_read(&ccmni->usage) > 0) { if (atomic_read(&ccmni->usage) > 0) {
@ -471,7 +472,7 @@ static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
if (ctlb->md_sta != MD_STATE_READY) if (ctlb->md_sta != MD_STATE_READY)
return; return;
if (!ctlb->ccmni_inst[0]) { if (!READ_ONCE(ctlb->ccmni_inst[0])) {
dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n"); dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
return; return;
} }

View file

@ -111,6 +111,8 @@
/* bits unique to S1G beacon */ /* bits unique to S1G beacon */
#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100 #define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
#define IEEE80211_S1G_BCN_CSSID 0x200
#define IEEE80211_S1G_BCN_ANO 0x400
/* see 802.11ah-2016 9.9 NDP CMAC frames */ /* see 802.11ah-2016 9.9 NDP CMAC frames */
#define IEEE80211_S1G_1MHZ_NDP_BITS 25 #define IEEE80211_S1G_1MHZ_NDP_BITS 25
@ -153,9 +155,6 @@
#define IEEE80211_ANO_NETTYPE_WILD 15 #define IEEE80211_ANO_NETTYPE_WILD 15
/* bits unique to S1G beacon */
#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */ /* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
#define IEEE80211_CTL_EXT_POLL 0x2000 #define IEEE80211_CTL_EXT_POLL 0x2000
#define IEEE80211_CTL_EXT_SPR 0x3000 #define IEEE80211_CTL_EXT_SPR 0x3000
@ -627,6 +626,42 @@ static inline bool ieee80211_is_s1g_beacon(__le16 fc)
cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON); cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON);
} }
/**
* ieee80211_s1g_has_next_tbtt - check if IEEE80211_S1G_BCN_NEXT_TBTT
* @fc: frame control bytes in little-endian byteorder
* Return: whether or not the frame contains the variable-length
* next TBTT field
*/
static inline bool ieee80211_s1g_has_next_tbtt(__le16 fc)
{
return ieee80211_is_s1g_beacon(fc) &&
(fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT));
}
/**
* ieee80211_s1g_has_ano - check if IEEE80211_S1G_BCN_ANO
* @fc: frame control bytes in little-endian byteorder
* Return: whether or not the frame contains the variable-length
* ANO field
*/
static inline bool ieee80211_s1g_has_ano(__le16 fc)
{
return ieee80211_is_s1g_beacon(fc) &&
(fc & cpu_to_le16(IEEE80211_S1G_BCN_ANO));
}
/**
* ieee80211_s1g_has_cssid - check if IEEE80211_S1G_BCN_CSSID
* @fc: frame control bytes in little-endian byteorder
* Return: whether or not the frame contains the variable-length
* compressed SSID field
*/
static inline bool ieee80211_s1g_has_cssid(__le16 fc)
{
return ieee80211_is_s1g_beacon(fc) &&
(fc & cpu_to_le16(IEEE80211_S1G_BCN_CSSID));
}
/** /**
* ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon
* @fc: frame control bytes in little-endian byteorder * @fc: frame control bytes in little-endian byteorder
@ -1245,16 +1280,40 @@ struct ieee80211_ext {
u8 change_seq; u8 change_seq;
u8 variable[0]; u8 variable[0];
} __packed s1g_beacon; } __packed s1g_beacon;
struct {
u8 sa[ETH_ALEN];
__le32 timestamp;
u8 change_seq;
u8 next_tbtt[3];
u8 variable[0];
} __packed s1g_short_beacon;
} u; } u;
} __packed __aligned(2); } __packed __aligned(2);
/**
* ieee80211_s1g_optional_len - determine length of optional S1G beacon fields
* @fc: frame control bytes in little-endian byteorder
* Return: total length in bytes of the optional fixed-length fields
*
* S1G beacons may contain up to three optional fixed-length fields that
* precede the variable-length elements. Whether these fields are present
* is indicated by flags in the frame control field.
*
* From IEEE 802.11-2024 section 9.3.4.3:
* - Next TBTT field may be 0 or 3 bytes
* - Short SSID field may be 0 or 4 bytes
* - Access Network Options (ANO) field may be 0 or 1 byte
*/
static inline size_t
ieee80211_s1g_optional_len(__le16 fc)
{
size_t len = 0;
if (ieee80211_s1g_has_next_tbtt(fc))
len += 3;
if (ieee80211_s1g_has_cssid(fc))
len += 4;
if (ieee80211_s1g_has_ano(fc))
len += 1;
return len;
}
#define IEEE80211_TWT_CONTROL_NDP BIT(0) #define IEEE80211_TWT_CONTROL_NDP BIT(0)
#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1) #define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1)
#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3) #define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)

View file

@ -152,7 +152,7 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to, const __be32 *from, const __be32 *to,
bool pseudohdr); bool pseudohdr);
void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
__wsum diff, bool pseudohdr); __wsum diff, bool pseudohdr, bool ipv6);
static __always_inline static __always_inline
void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,

View file

@ -2056,6 +2056,7 @@ union bpf_attr {
* for updates resulting in a null checksum the value is set to * for updates resulting in a null checksum the value is set to
* **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
* that the modified header field is part of the pseudo-header. * that the modified header field is part of the pseudo-header.
* Flag **BPF_F_IPV6** should be set for IPv6 packets.
* *
* This helper works in combination with **bpf_csum_diff**\ (), * This helper works in combination with **bpf_csum_diff**\ (),
* which does not update the checksum in-place, but offers more * which does not update the checksum in-place, but offers more
@ -6072,6 +6073,7 @@ enum {
BPF_F_PSEUDO_HDR = (1ULL << 4), BPF_F_PSEUDO_HDR = (1ULL << 4),
BPF_F_MARK_MANGLED_0 = (1ULL << 5), BPF_F_MARK_MANGLED_0 = (1ULL << 5),
BPF_F_MARK_ENFORCE = (1ULL << 6), BPF_F_MARK_ENFORCE = (1ULL << 6),
BPF_F_IPV6 = (1ULL << 7),
}; };
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */

View file

@ -4870,7 +4870,8 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
if (!smp_sufficient_security(conn->hcon, pchan->sec_level, if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
SMP_ALLOW_STK)) { SMP_ALLOW_STK)) {
result = L2CAP_CR_LE_AUTHENTICATION; result = pchan->sec_level == BT_SECURITY_MEDIUM ?
L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
chan = NULL; chan = NULL;
goto response_unlock; goto response_unlock;
} }

View file

@ -2566,7 +2566,8 @@ static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
struct mgmt_pending_cmd *cmd; struct mgmt_pending_cmd *cmd;
int err; int err;
if (len < sizeof(*cp)) if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
le16_to_cpu(cp->params_len)))
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
MGMT_STATUS_INVALID_PARAMS); MGMT_STATUS_INVALID_PARAMS);

View file

@ -9968,6 +9968,7 @@ int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf)
return dev->netdev_ops->ndo_bpf(dev, bpf); return dev->netdev_ops->ndo_bpf(dev, bpf);
} }
EXPORT_SYMBOL_GPL(netif_xdp_propagate);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
{ {
@ -10498,7 +10499,7 @@ static void dev_index_release(struct net *net, int ifindex)
static bool from_cleanup_net(void) static bool from_cleanup_net(void)
{ {
#ifdef CONFIG_NET_NS #ifdef CONFIG_NET_NS
return current == cleanup_net_task; return current == READ_ONCE(cleanup_net_task);
#else #else
return false; return false;
#endif #endif

View file

@ -170,8 +170,9 @@ static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
} }
static inline struct net_devmem_dmabuf_binding * static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, net_devmem_bind_dmabuf(struct net_device *dev,
enum dma_data_direction direction, enum dma_data_direction direction,
unsigned int dmabuf_fd,
struct netdev_nl_sock *priv, struct netdev_nl_sock *priv,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {

View file

@ -1968,10 +1968,11 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
bool is_pseudo = flags & BPF_F_PSEUDO_HDR; bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
bool do_mforce = flags & BPF_F_MARK_ENFORCE; bool do_mforce = flags & BPF_F_MARK_ENFORCE;
bool is_ipv6 = flags & BPF_F_IPV6;
__sum16 *ptr; __sum16 *ptr;
if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK | BPF_F_IPV6)))
return -EINVAL; return -EINVAL;
if (unlikely(offset > 0xffff || offset & 1)) if (unlikely(offset > 0xffff || offset & 1))
return -EFAULT; return -EFAULT;
@ -1987,7 +1988,7 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
if (unlikely(from != 0)) if (unlikely(from != 0))
return -EINVAL; return -EINVAL;
inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo, is_ipv6);
break; break;
case 2: case 2:
inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);

View file

@ -654,7 +654,7 @@ static void cleanup_net(struct work_struct *work)
struct net *net, *tmp, *last; struct net *net, *tmp, *last;
LIST_HEAD(net_exit_list); LIST_HEAD(net_exit_list);
cleanup_net_task = current; WRITE_ONCE(cleanup_net_task, current);
/* Atomically snapshot the list of namespaces to cleanup */ /* Atomically snapshot the list of namespaces to cleanup */
net_kill_list = llist_del_all(&cleanup_list); net_kill_list = llist_del_all(&cleanup_list);
@ -704,7 +704,7 @@ static void cleanup_net(struct work_struct *work)
put_user_ns(net->user_ns); put_user_ns(net->user_ns);
net_passive_dec(net); net_passive_dec(net);
} }
cleanup_net_task = NULL; WRITE_ONCE(cleanup_net_task, NULL);
} }
/** /**

View file

@ -153,9 +153,9 @@ u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
EXPORT_SYMBOL(page_pool_ethtool_stats_get); EXPORT_SYMBOL(page_pool_ethtool_stats_get);
#else #else
#define alloc_stat_inc(pool, __stat) #define alloc_stat_inc(...) do { } while (0)
#define recycle_stat_inc(pool, __stat) #define recycle_stat_inc(...) do { } while (0)
#define recycle_stat_add(pool, __stat, val) #define recycle_stat_add(...) do { } while (0)
#endif #endif
static bool page_pool_producer_lock(struct page_pool *pool) static bool page_pool_producer_lock(struct page_pool *pool)
@ -741,19 +741,16 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem) static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
{ {
int ret; bool in_softirq, ret;
/* BH protection not needed if current is softirq */ /* BH protection not needed if current is softirq */
if (in_softirq()) in_softirq = page_pool_producer_lock(pool);
ret = ptr_ring_produce(&pool->ring, (__force void *)netmem); ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem);
else if (ret)
ret = ptr_ring_produce_bh(&pool->ring, (__force void *)netmem);
if (!ret) {
recycle_stat_inc(pool, ring); recycle_stat_inc(pool, ring);
return true; page_pool_producer_unlock(pool, in_softirq);
}
return false; return ret;
} }
/* Only allow direct recycling in special circumstances, into the /* Only allow direct recycling in special circumstances, into the
@ -1150,10 +1147,14 @@ static void page_pool_scrub(struct page_pool *pool)
static int page_pool_release(struct page_pool *pool) static int page_pool_release(struct page_pool *pool)
{ {
bool in_softirq;
int inflight; int inflight;
page_pool_scrub(pool); page_pool_scrub(pool);
inflight = page_pool_inflight(pool, true); inflight = page_pool_inflight(pool, true);
/* Acquire producer lock to make sure producers have exited. */
in_softirq = page_pool_producer_lock(pool);
page_pool_producer_unlock(pool, in_softirq);
if (!inflight) if (!inflight)
__page_pool_destroy(pool); __page_pool_destroy(pool);

View file

@ -3671,7 +3671,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
if (tb[IFLA_LINKMODE]) if (tb[IFLA_LINKMODE])
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
if (tb[IFLA_GROUP]) if (tb[IFLA_GROUP])
dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); netif_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
if (tb[IFLA_GSO_MAX_SIZE]) if (tb[IFLA_GSO_MAX_SIZE])
netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
if (tb[IFLA_GSO_MAX_SEGS]) if (tb[IFLA_GSO_MAX_SEGS])

View file

@ -3284,16 +3284,16 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
{ {
struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL; struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL;
struct proto *prot = sk->sk_prot; struct proto *prot = sk->sk_prot;
bool charged = false; bool charged = true;
long allocated; long allocated;
sk_memory_allocated_add(sk, amt); sk_memory_allocated_add(sk, amt);
allocated = sk_memory_allocated(sk); allocated = sk_memory_allocated(sk);
if (memcg) { if (memcg) {
if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge())) charged = mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge());
if (!charged)
goto suppress_allocation; goto suppress_allocation;
charged = true;
} }
/* Under limit. */ /* Under limit. */
@ -3378,7 +3378,7 @@ suppress_allocation:
sk_memory_allocated_sub(sk, amt); sk_memory_allocated_sub(sk, amt);
if (charged) if (memcg && charged)
mem_cgroup_uncharge_skmem(memcg, amt); mem_cgroup_uncharge_skmem(memcg, amt);
return 0; return 0;

View file

@ -473,11 +473,11 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
EXPORT_SYMBOL(inet_proto_csum_replace16); EXPORT_SYMBOL(inet_proto_csum_replace16);
void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
__wsum diff, bool pseudohdr) __wsum diff, bool pseudohdr, bool ipv6)
{ {
if (skb->ip_summed != CHECKSUM_PARTIAL) { if (skb->ip_summed != CHECKSUM_PARTIAL) {
csum_replace_by_diff(sum, diff); csum_replace_by_diff(sum, diff);
if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr && !ipv6)
skb->csum = ~csum_sub(diff, skb->csum); skb->csum = ~csum_sub(diff, skb->csum);
} else if (pseudohdr) { } else if (pseudohdr) {
*sum = ~csum_fold(csum_add(diff, csum_unfold(*sum))); *sum = ~csum_fold(csum_add(diff, csum_unfold(*sum)));

View file

@ -257,7 +257,7 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
int source_port; int source_port;
u8 *brcm_tag; u8 *brcm_tag;
if (unlikely(!pskb_may_pull(skb, BRCM_LEG_PORT_ID))) if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN)))
return NULL; return NULL;
brcm_tag = dsa_etype_header_pos_rx(skb); brcm_tag = dsa_etype_header_pos_rx(skb);

View file

@ -495,6 +495,7 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
bool copy_dtor; bool copy_dtor;
__sum16 check; __sum16 check;
__be16 newlen; __be16 newlen;
int ret = 0;
mss = skb_shinfo(gso_skb)->gso_size; mss = skb_shinfo(gso_skb)->gso_size;
if (gso_skb->len <= sizeof(*uh) + mss) if (gso_skb->len <= sizeof(*uh) + mss)
@ -523,6 +524,10 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size)
return __udp_gso_segment_list(gso_skb, features, is_ipv6); return __udp_gso_segment_list(gso_skb, features, is_ipv6);
ret = __skb_linearize(gso_skb);
if (ret)
return ERR_PTR(ret);
/* Setup csum, as fraglist skips this in udp4_gro_receive. */ /* Setup csum, as fraglist skips this in udp4_gro_receive. */
gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head; gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head;
gso_skb->csum_offset = offsetof(struct udphdr, check); gso_skb->csum_offset = offsetof(struct udphdr, check);

View file

@ -86,7 +86,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb,
diff = get_csum_diff(ip6h, p); diff = get_csum_diff(ip6h, p);
inet_proto_csum_replace_by_diff(&th->check, skb, inet_proto_csum_replace_by_diff(&th->check, skb,
diff, true); diff, true, true);
} }
break; break;
case NEXTHDR_UDP: case NEXTHDR_UDP:
@ -97,7 +97,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb,
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
diff = get_csum_diff(ip6h, p); diff = get_csum_diff(ip6h, p);
inet_proto_csum_replace_by_diff(&uh->check, skb, inet_proto_csum_replace_by_diff(&uh->check, skb,
diff, true); diff, true, true);
if (!uh->check) if (!uh->check)
uh->check = CSUM_MANGLED_0; uh->check = CSUM_MANGLED_0;
} }
@ -111,7 +111,7 @@ static void ila_csum_adjust_transport(struct sk_buff *skb,
diff = get_csum_diff(ip6h, p); diff = get_csum_diff(ip6h, p);
inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb, inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb,
diff, true); diff, true, true);
} }
break; break;
} }

View file

@ -1644,10 +1644,8 @@ static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = {
[SEG6_LOCAL_SRH] = { .type = NLA_BINARY }, [SEG6_LOCAL_SRH] = { .type = NLA_BINARY },
[SEG6_LOCAL_TABLE] = { .type = NLA_U32 }, [SEG6_LOCAL_TABLE] = { .type = NLA_U32 },
[SEG6_LOCAL_VRFTABLE] = { .type = NLA_U32 }, [SEG6_LOCAL_VRFTABLE] = { .type = NLA_U32 },
[SEG6_LOCAL_NH4] = { .type = NLA_BINARY, [SEG6_LOCAL_NH4] = NLA_POLICY_EXACT_LEN(sizeof(struct in_addr)),
.len = sizeof(struct in_addr) }, [SEG6_LOCAL_NH6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
[SEG6_LOCAL_NH6] = { .type = NLA_BINARY,
.len = sizeof(struct in6_addr) },
[SEG6_LOCAL_IIF] = { .type = NLA_U32 }, [SEG6_LOCAL_IIF] = { .type = NLA_U32 },
[SEG6_LOCAL_OIF] = { .type = NLA_U32 }, [SEG6_LOCAL_OIF] = { .type = NLA_U32 },
[SEG6_LOCAL_BPF] = { .type = NLA_NESTED }, [SEG6_LOCAL_BPF] = { .type = NLA_NESTED },

View file

@ -7220,11 +7220,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type); bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type);
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
struct ieee80211_ext *ext = (void *) mgmt; struct ieee80211_ext *ext = (void *) mgmt;
variable = ext->u.s1g_beacon.variable +
if (ieee80211_is_s1g_short_beacon(ext->frame_control)) ieee80211_s1g_optional_len(ext->frame_control);
variable = ext->u.s1g_short_beacon.variable;
else
variable = ext->u.s1g_beacon.variable;
} }
baselen = (u8 *) variable - (u8 *) mgmt; baselen = (u8 *) variable - (u8 *) mgmt;

View file

@ -276,6 +276,7 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
struct ieee80211_mgmt *mgmt = (void *)skb->data; struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss; struct ieee80211_bss *bss;
struct ieee80211_channel *channel; struct ieee80211_channel *channel;
struct ieee80211_ext *ext;
size_t min_hdr_len = offsetof(struct ieee80211_mgmt, size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
u.probe_resp.variable); u.probe_resp.variable);
@ -285,12 +286,10 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
return; return;
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) ext = (struct ieee80211_ext *)mgmt;
min_hdr_len = offsetof(struct ieee80211_ext, min_hdr_len =
u.s1g_short_beacon.variable); offsetof(struct ieee80211_ext, u.s1g_beacon.variable) +
else ieee80211_s1g_optional_len(ext->frame_control);
min_hdr_len = offsetof(struct ieee80211_ext,
u.s1g_beacon);
} }
if (skb->len < min_hdr_len) if (skb->len < min_hdr_len)

View file

@ -248,7 +248,7 @@ static noinline bool
nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple, nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_ct) const struct nf_conn *ignored_ct)
{ {
static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST_BIT; static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST;
const struct nf_conntrack_tuple_hash *thash; const struct nf_conntrack_tuple_hash *thash;
const struct nf_conntrack_zone *zone; const struct nf_conntrack_zone *zone;
struct nf_conn *ct; struct nf_conn *ct;
@ -287,8 +287,14 @@ nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
zone = nf_ct_zone(ignored_ct); zone = nf_ct_zone(ignored_ct);
thash = nf_conntrack_find_get(net, zone, tuple); thash = nf_conntrack_find_get(net, zone, tuple);
if (unlikely(!thash)) /* clashing entry went away */ if (unlikely(!thash)) {
return false; struct nf_conntrack_tuple reply;
nf_ct_invert_tuple(&reply, tuple);
thash = nf_conntrack_find_get(net, zone, &reply);
if (!thash) /* clashing entry went away */
return false;
}
ct = nf_ct_tuplehash_to_ctrack(thash); ct = nf_ct_tuplehash_to_ctrack(thash);

View file

@ -1113,6 +1113,25 @@ bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features,
return true; return true;
} }
/**
* pipapo_resmap_init_avx2() - Initialise result map before first use
* @m: Matching data, including mapping table
* @res_map: Result map
*
* Like pipapo_resmap_init() but do not set start map bits covered by the first field.
*/
static inline void pipapo_resmap_init_avx2(const struct nft_pipapo_match *m, unsigned long *res_map)
{
const struct nft_pipapo_field *f = m->f;
int i;
/* Starting map doesn't need to be set to all-ones for this implementation,
* but we do need to zero the remaining bits, if any.
*/
for (i = f->bsize; i < m->bsize_max; i++)
res_map[i] = 0ul;
}
/** /**
* nft_pipapo_avx2_lookup() - Lookup function for AVX2 implementation * nft_pipapo_avx2_lookup() - Lookup function for AVX2 implementation
* @net: Network namespace * @net: Network namespace
@ -1171,7 +1190,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
res = scratch->map + (map_index ? m->bsize_max : 0); res = scratch->map + (map_index ? m->bsize_max : 0);
fill = scratch->map + (map_index ? 0 : m->bsize_max); fill = scratch->map + (map_index ? 0 : m->bsize_max);
/* Starting map doesn't need to be set for this implementation */ pipapo_resmap_init_avx2(m, res);
nft_pipapo_avx2_prepare(); nft_pipapo_avx2_prepare();

View file

@ -1165,8 +1165,10 @@ int netlbl_conn_setattr(struct sock *sk,
break; break;
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: case AF_INET6:
if (sk->sk_family != AF_INET6) if (sk->sk_family != AF_INET6) {
return -EAFNOSUPPORT; ret_val = -EAFNOSUPPORT;
goto conn_setattr_return;
}
addr6 = (struct sockaddr_in6 *)addr; addr6 = (struct sockaddr_in6 *)addr;
entry = netlbl_domhsh_getentry_af6(secattr->domain, entry = netlbl_domhsh_getentry_af6(secattr->domain,

View file

@ -45,8 +45,9 @@ static void none_free_call_crypto(struct rxrpc_call *call)
static bool none_validate_challenge(struct rxrpc_connection *conn, static bool none_validate_challenge(struct rxrpc_connection *conn,
struct sk_buff *skb) struct sk_buff *skb)
{ {
return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
rxrpc_eproto_rxnull_challenge); rxrpc_eproto_rxnull_challenge);
return true;
} }
static int none_sendmsg_respond_to_challenge(struct sk_buff *challenge, static int none_sendmsg_respond_to_challenge(struct sk_buff *challenge,

View file

@ -818,7 +818,11 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
} }
/* Get net to avoid freed tipc_crypto when delete namespace */ /* Get net to avoid freed tipc_crypto when delete namespace */
get_net(aead->crypto->net); if (!maybe_get_net(aead->crypto->net)) {
tipc_bearer_put(b);
rc = -ENODEV;
goto exit;
}
/* Now, do encrypt */ /* Now, do encrypt */
rc = crypto_aead_encrypt(req); rc = crypto_aead_encrypt(req);

View file

@ -3250,6 +3250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
const u8 *ie; const u8 *ie;
size_t ielen; size_t ielen;
u64 tsf; u64 tsf;
size_t s1g_optional_len;
if (WARN_ON(!mgmt)) if (WARN_ON(!mgmt))
return NULL; return NULL;
@ -3264,12 +3265,11 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
ext = (void *) mgmt; ext = (void *) mgmt;
if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) s1g_optional_len =
min_hdr_len = offsetof(struct ieee80211_ext, ieee80211_s1g_optional_len(ext->frame_control);
u.s1g_short_beacon.variable); min_hdr_len =
else offsetof(struct ieee80211_ext, u.s1g_beacon.variable) +
min_hdr_len = offsetof(struct ieee80211_ext, s1g_optional_len;
u.s1g_beacon.variable);
} else { } else {
/* same for beacons */ /* same for beacons */
min_hdr_len = offsetof(struct ieee80211_mgmt, min_hdr_len = offsetof(struct ieee80211_mgmt,
@ -3285,11 +3285,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
const struct ieee80211_s1g_bcn_compat_ie *compat; const struct ieee80211_s1g_bcn_compat_ie *compat;
const struct element *elem; const struct element *elem;
if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) ie = ext->u.s1g_beacon.variable + s1g_optional_len;
ie = ext->u.s1g_short_beacon.variable;
else
ie = ext->u.s1g_beacon.variable;
elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT, ie, ielen); elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT, ie, ielen);
if (!elem) if (!elem)
return NULL; return NULL;

View file

@ -2056,6 +2056,7 @@ union bpf_attr {
* for updates resulting in a null checksum the value is set to * for updates resulting in a null checksum the value is set to
* **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
* that the modified header field is part of the pseudo-header. * that the modified header field is part of the pseudo-header.
* Flag **BPF_F_IPV6** should be set for IPv6 packets.
* *
* This helper works in combination with **bpf_csum_diff**\ (), * This helper works in combination with **bpf_csum_diff**\ (),
* which does not update the checksum in-place, but offers more * which does not update the checksum in-place, but offers more
@ -6072,6 +6073,7 @@ enum {
BPF_F_PSEUDO_HDR = (1ULL << 4), BPF_F_PSEUDO_HDR = (1ULL << 4),
BPF_F_MARK_MANGLED_0 = (1ULL << 5), BPF_F_MARK_MANGLED_0 = (1ULL << 5),
BPF_F_MARK_ENFORCE = (1ULL << 6), BPF_F_MARK_ENFORCE = (1ULL << 6),
BPF_F_IPV6 = (1ULL << 7),
}; };
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */

View file

@ -10,7 +10,6 @@ CONFIG_KUNIT_EXAMPLE_TEST=y
CONFIG_KUNIT_ALL_TESTS=y CONFIG_KUNIT_ALL_TESTS=y
CONFIG_FORTIFY_SOURCE=y CONFIG_FORTIFY_SOURCE=y
CONFIG_INIT_STACK_ALL_PATTERN=y
CONFIG_IIO=y CONFIG_IIO=y

View file

@ -205,7 +205,7 @@ export KHDR_INCLUDES
all: all:
@ret=1; \ @ret=1; \
for TARGET in $(TARGETS); do \ for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \ BUILD_TARGET=$$BUILD/$$TARGET; \
mkdir $$BUILD_TARGET -p; \ mkdir $$BUILD_TARGET -p; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET \

View file

@ -0,0 +1,5 @@
CONFIG_IPV6=y
CONFIG_IPV6_GRE=y
CONFIG_NET_IPGRE=y
CONFIG_NET_IPGRE_DEMUX=y
CONFIG_VXLAN=y

View file

@ -39,7 +39,7 @@ def run_one_stream(cfg, ipver, remote_v4, remote_v6, should_lso):
port = rand_port() port = rand_port()
listen_cmd = f"socat -{ipver} -t 2 -u TCP-LISTEN:{port},reuseport /dev/null,ignoreeof" listen_cmd = f"socat -{ipver} -t 2 -u TCP-LISTEN:{port},reuseport /dev/null,ignoreeof"
with bkg(listen_cmd, host=cfg.remote) as nc: with bkg(listen_cmd, host=cfg.remote, exit_wait=True) as nc:
wait_port_listen(port, host=cfg.remote) wait_port_listen(port, host=cfg.remote)
if ipver == "4": if ipver == "4":
@ -216,7 +216,7 @@ def main() -> None:
("", "6", "tx-tcp6-segmentation", None), ("", "6", "tx-tcp6-segmentation", None),
("vxlan", "", "tx-udp_tnl-segmentation", ("vxlan", True, "id 100 dstport 4789 noudpcsum")), ("vxlan", "", "tx-udp_tnl-segmentation", ("vxlan", True, "id 100 dstport 4789 noudpcsum")),
("vxlan_csum", "", "tx-udp_tnl-csum-segmentation", ("vxlan", False, "id 100 dstport 4789 udpcsum")), ("vxlan_csum", "", "tx-udp_tnl-csum-segmentation", ("vxlan", False, "id 100 dstport 4789 udpcsum")),
("gre", "4", "tx-gre-segmentation", ("ipgre", False, "")), ("gre", "4", "tx-gre-segmentation", ("gre", False, "")),
("gre", "6", "tx-gre-segmentation", ("ip6gre", False, "")), ("gre", "6", "tx-gre-segmentation", ("ip6gre", False, "")),
) )

View file

@ -419,6 +419,7 @@ table inet filter {
set test { set test {
type ${type_spec} type ${type_spec}
counter
flags interval,timeout flags interval,timeout
} }
@ -1158,8 +1159,17 @@ del() {
fi fi
} }
# Return packet count from 'test' counter in 'inet filter' table # Return packet count for elem $1 from 'test' counter in 'inet filter' table
count_packets() { count_packets() {
found=0
for token in $(nft reset element inet filter test "${1}" ); do
[ ${found} -eq 1 ] && echo "${token}" && return
[ "${token}" = "packets" ] && found=1
done
}
# Return packet count from 'test' counter in 'inet filter' table
count_packets_nomatch() {
found=0 found=0
for token in $(nft list counter inet filter test); do for token in $(nft list counter inet filter test); do
[ ${found} -eq 1 ] && echo "${token}" && return [ ${found} -eq 1 ] && echo "${token}" && return
@ -1206,6 +1216,10 @@ perf() {
# Set MAC addresses, send single packet, check that it matches, reset counter # Set MAC addresses, send single packet, check that it matches, reset counter
send_match() { send_match() {
local elem="$1"
shift
ip link set veth_a address "$(format_mac "${1}")" ip link set veth_a address "$(format_mac "${1}")"
ip -n B link set veth_b address "$(format_mac "${2}")" ip -n B link set veth_b address "$(format_mac "${2}")"
@ -1216,7 +1230,7 @@ send_match() {
eval src_"$f"=\$\(format_\$f "${2}"\) eval src_"$f"=\$\(format_\$f "${2}"\)
done done
eval send_\$proto eval send_\$proto
if [ "$(count_packets)" != "1" ]; then if [ "$(count_packets "$elem")" != "1" ]; then
err "${proto} packet to:" err "${proto} packet to:"
err " $(for f in ${dst}; do err " $(for f in ${dst}; do
eval format_\$f "${1}"; printf ' '; done)" eval format_\$f "${1}"; printf ' '; done)"
@ -1242,7 +1256,7 @@ send_nomatch() {
eval src_"$f"=\$\(format_\$f "${2}"\) eval src_"$f"=\$\(format_\$f "${2}"\)
done done
eval send_\$proto eval send_\$proto
if [ "$(count_packets)" != "0" ]; then if [ "$(count_packets_nomatch)" != "0" ]; then
err "${proto} packet to:" err "${proto} packet to:"
err " $(for f in ${dst}; do err " $(for f in ${dst}; do
eval format_\$f "${1}"; printf ' '; done)" eval format_\$f "${1}"; printf ' '; done)"
@ -1255,6 +1269,42 @@ send_nomatch() {
fi fi
} }
maybe_send_nomatch() {
local elem="$1"
local what="$4"
[ $((RANDOM%20)) -gt 0 ] && return
dst_addr4="$2"
dst_port="$3"
send_udp
if [ "$(count_packets_nomatch)" != "0" ]; then
err "Packet to $dst_addr4:$dst_port did match $what"
err "$(nft -a list ruleset)"
return 1
fi
}
maybe_send_match() {
local elem="$1"
local what="$4"
[ $((RANDOM%20)) -gt 0 ] && return
dst_addr4="$2"
dst_port="$3"
send_udp
if [ "$(count_packets "{ $elem }")" != "1" ]; then
err "Packet to $dst_addr4:$dst_port did not match $what"
err "$(nft -a list ruleset)"
return 1
fi
nft reset counter inet filter test >/dev/null
nft reset element inet filter test "{ $elem }" >/dev/null
}
# Correctness test template: # Correctness test template:
# - add ranged element, check that packets match it # - add ranged element, check that packets match it
# - check that packets outside range don't match it # - check that packets outside range don't match it
@ -1262,6 +1312,8 @@ send_nomatch() {
test_correctness_main() { test_correctness_main() {
range_size=1 range_size=1
for i in $(seq "${start}" $((start + count))); do for i in $(seq "${start}" $((start + count))); do
local elem=""
end=$((start + range_size)) end=$((start + range_size))
# Avoid negative or zero-sized port ranges # Avoid negative or zero-sized port ranges
@ -1272,15 +1324,16 @@ test_correctness_main() {
srcstart=$((start + src_delta)) srcstart=$((start + src_delta))
srcend=$((end + src_delta)) srcend=$((end + src_delta))
add "$(format)" || return 1 elem="$(format)"
add "$elem" || return 1
for j in $(seq "$start" $((range_size / 2 + 1)) ${end}); do for j in $(seq "$start" $((range_size / 2 + 1)) ${end}); do
send_match "${j}" $((j + src_delta)) || return 1 send_match "$elem" "${j}" $((j + src_delta)) || return 1
done done
send_nomatch $((end + 1)) $((end + 1 + src_delta)) || return 1 send_nomatch $((end + 1)) $((end + 1 + src_delta)) || return 1
# Delete elements now and then # Delete elements now and then
if [ $((i % 3)) -eq 0 ]; then if [ $((i % 3)) -eq 0 ]; then
del "$(format)" || return 1 del "$elem" || return 1
for j in $(seq "$start" \ for j in $(seq "$start" \
$((range_size / 2 + 1)) ${end}); do $((range_size / 2 + 1)) ${end}); do
send_nomatch "${j}" $((j + src_delta)) \ send_nomatch "${j}" $((j + src_delta)) \
@ -1572,14 +1625,17 @@ test_timeout() {
range_size=1 range_size=1
for i in $(seq "$start" $((start + count))); do for i in $(seq "$start" $((start + count))); do
local elem=""
end=$((start + range_size)) end=$((start + range_size))
srcstart=$((start + src_delta)) srcstart=$((start + src_delta))
srcend=$((end + src_delta)) srcend=$((end + src_delta))
add "$(format)" || return 1 elem="$(format)"
add "$elem" || return 1
for j in $(seq "$start" $((range_size / 2 + 1)) ${end}); do for j in $(seq "$start" $((range_size / 2 + 1)) ${end}); do
send_match "${j}" $((j + src_delta)) || return 1 send_match "$elem" "${j}" $((j + src_delta)) || return 1
done done
range_size=$((range_size + 1)) range_size=$((range_size + 1))
@ -1737,7 +1793,7 @@ test_bug_reload() {
srcend=$((end + src_delta)) srcend=$((end + src_delta))
for j in $(seq "$start" $((range_size / 2 + 1)) ${end}); do for j in $(seq "$start" $((range_size / 2 + 1)) ${end}); do
send_match "${j}" $((j + src_delta)) || return 1 send_match "$(format)" "${j}" $((j + src_delta)) || return 1
done done
range_size=$((range_size + 1)) range_size=$((range_size + 1))
@ -1756,22 +1812,34 @@ test_bug_net_port_proto_match() {
range_size=1 range_size=1
for i in $(seq 1 10); do for i in $(seq 1 10); do
for j in $(seq 1 20) ; do for j in $(seq 1 20) ; do
elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))") local dport=$j
elem=$(printf "10.%d.%d.0/24 . %d-%d0 . 6-17 " ${i} ${j} ${dport} "$((dport+1))")
# too slow, do not test all addresses
maybe_send_nomatch "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d1" $((dport+1))) "before add" || return 1
nft "add element inet filter test { $elem }" || return 1 nft "add element inet filter test { $elem }" || return 1
maybe_send_match "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d" $dport) "after add" || return 1
nft "get element inet filter test { $elem }" | grep -q "$elem" nft "get element inet filter test { $elem }" | grep -q "$elem"
if [ $? -ne 0 ];then if [ $? -ne 0 ];then
local got=$(nft "get element inet filter test { $elem }") local got=$(nft "get element inet filter test { $elem }")
err "post-add: should have returned $elem but got $got" err "post-add: should have returned $elem but got $got"
return 1 return 1
fi fi
maybe_send_nomatch "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d1" $((dport+1))) "out-of-range" || return 1
done done
done done
# recheck after set was filled # recheck after set was filled
for i in $(seq 1 10); do for i in $(seq 1 10); do
for j in $(seq 1 20) ; do for j in $(seq 1 20) ; do
elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))") local dport=$j
elem=$(printf "10.%d.%d.0/24 . %d-%d0 . 6-17 " ${i} ${j} ${dport} "$((dport+1))")
nft "get element inet filter test { $elem }" | grep -q "$elem" nft "get element inet filter test { $elem }" | grep -q "$elem"
if [ $? -ne 0 ];then if [ $? -ne 0 ];then
@ -1779,6 +1847,9 @@ test_bug_net_port_proto_match() {
err "post-fill: should have returned $elem but got $got" err "post-fill: should have returned $elem but got $got"
return 1 return 1
fi fi
maybe_send_match "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d" $dport) "recheck" || return 1
maybe_send_nomatch "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d1" $((dport+1))) "recheck out-of-range" || return 1
done done
done done
@ -1786,9 +1857,10 @@ test_bug_net_port_proto_match() {
for i in $(seq 1 10); do for i in $(seq 1 10); do
for j in $(seq 1 20) ; do for j in $(seq 1 20) ; do
local rnd=$((RANDOM%10)) local rnd=$((RANDOM%10))
local dport=$j
local got="" local got=""
elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))") elem=$(printf "10.%d.%d.0/24 . %d-%d0 . 6-17 " ${i} ${j} ${dport} "$((dport+1))")
if [ $rnd -gt 0 ];then if [ $rnd -gt 0 ];then
continue continue
fi fi
@ -1799,6 +1871,8 @@ test_bug_net_port_proto_match() {
err "post-delete: query for $elem returned $got instead of error." err "post-delete: query for $elem returned $got instead of error."
return 1 return 1
fi fi
maybe_send_nomatch "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d" $dport) "match after deletion" || return 1
done done
done done
@ -1817,7 +1891,7 @@ test_bug_avx2_mismatch()
dst_addr6="$a2" dst_addr6="$a2"
send_icmp6 send_icmp6
if [ "$(count_packets)" -gt "0" ]; then if [ "$(count_packets "{ icmpv6 . $a1 }")" -gt "0" ]; then
err "False match for $a2" err "False match for $a2"
return 1 return 1
fi fi

View file

@ -866,6 +866,24 @@ EOF
ip netns exec "$ns0" nft delete table $family nat ip netns exec "$ns0" nft delete table $family nat
} }
file_cmp()
{
local infile="$1"
local outfile="$2"
if ! cmp "$infile" "$outfile";then
echo -n "Infile "
ls -l "$infile"
echo -n "Outfile "
ls -l "$outfile"
echo "ERROR: in and output file mismatch when checking $msg" 1>&1
ret=1
return 1
fi
return 0
}
test_stateless_nat_ip() test_stateless_nat_ip()
{ {
local lret=0 local lret=0
@ -966,11 +984,7 @@ EOF
wait wait
if ! cmp "$INFILE" "$OUTFILE";then file_cmp "$INFILE" "$OUTFILE" "udp with stateless nat" || lret=1
ls -l "$INFILE" "$OUTFILE"
echo "ERROR: in and output file mismatch when checking udp with stateless nat" 1>&2
lret=1
fi
:> "$OUTFILE" :> "$OUTFILE"
@ -991,6 +1005,62 @@ EOF
return $lret return $lret
} }
test_dnat_clash()
{
local lret=0
if ! socat -h > /dev/null 2>&1;then
echo "SKIP: Could not run dnat clash test without socat tool"
[ $ret -eq 0 ] && ret=$ksft_skip
return $ksft_skip
fi
ip netns exec "$ns0" nft -f /dev/stdin <<EOF
flush ruleset
table ip dnat-test {
chain prerouting {
type nat hook prerouting priority dstnat; policy accept;
ip daddr 10.0.2.1 udp dport 1234 counter dnat to 10.0.1.1:1234
}
}
EOF
if [ $? -ne 0 ]; then
echo "SKIP: Could not add dnat rules"
[ $ret -eq 0 ] && ret=$ksft_skip
return $ksft_skip
fi
local udpdaddr="10.0.2.1"
for i in 1 2;do
echo "PING $udpdaddr" > "$INFILE"
echo "PONG 10.0.1.1 step $i" | ip netns exec "$ns0" timeout 3 socat STDIO UDP4-LISTEN:1234,bind=10.0.1.1 > "$OUTFILE" 2>/dev/null &
local lpid=$!
busywait $BUSYWAIT_TIMEOUT listener_ready "$ns0" 1234 "-u"
result=$(ip netns exec "$ns1" timeout 3 socat STDIO UDP4-SENDTO:"$udpdaddr:1234,sourceport=4321" < "$INFILE")
udpdaddr="10.0.1.1"
if [ "$result" != "PONG 10.0.1.1 step $i" ] ; then
echo "ERROR: failed to test udp $ns1 to $ns2 with dnat rule step $i, result: \"$result\"" 1>&2
lret=1
ret=1
fi
wait
file_cmp "$INFILE" "$OUTFILE" "udp dnat step $i" || lret=1
:> "$OUTFILE"
done
test $lret -eq 0 && echo "PASS: IP dnat clash $ns1:$ns2"
ip netns exec "$ns0" nft flush ruleset
return $lret
}
# ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99 # ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99
for i in "$ns0" "$ns1" "$ns2" ;do for i in "$ns0" "$ns1" "$ns2" ;do
ip netns exec "$i" nft -f /dev/stdin <<EOF ip netns exec "$i" nft -f /dev/stdin <<EOF
@ -1147,6 +1217,7 @@ $test_inet_nat && test_redirect6 inet
test_port_shadowing test_port_shadowing
test_stateless_nat_ip test_stateless_nat_ip
test_dnat_clash
if [ $ret -ne 0 ];then if [ $ret -ne 0 ];then
echo -n "FAIL: " echo -n "FAIL: "

View file

@ -2166,6 +2166,7 @@ static int ovpn_parse_cmd_args(struct ovpn_ctx *ovpn, int argc, char *argv[])
ovpn->peers_file = argv[4]; ovpn->peers_file = argv[4];
ovpn->sa_family = AF_INET;
if (argc > 5 && !strcmp(argv[5], "ipv6")) if (argc > 5 && !strcmp(argv[5], "ipv6"))
ovpn->sa_family = AF_INET6; ovpn->sa_family = AF_INET6;
break; break;

View file

@ -0,0 +1,9 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2025 OpenVPN, Inc.
#
# Author: Antonio Quartulli <antonio@openvpn.net>
MTU="1500"
source test.sh