summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt35
-rw-r--r--Documentation/admin-guide/sysctl/net.rst8
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es8316.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/realtek,rt5640.yaml11
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-spdif.yaml3
-rw-r--r--Documentation/devicetree/bindings/ufs/ufs-common.yaml4
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-arm-mali-c55.rst2
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/mips/mm/init.c23
-rw-r--r--arch/powerpc/kernel/watchdog.c15
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c6
-rw-r--r--arch/x86/kernel/fpu/core.c32
-rw-r--r--arch/x86/kernel/kvm.c19
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--drivers/acpi/x86/s2idle.c9
-rw-r--r--drivers/dax/dax-private.h10
-rw-r--r--drivers/firmware/efi/cper.c2
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/gpio/gpio-davinci.c18
-rw-r--r--drivers/gpio/gpiolib.c3
-rw-r--r--drivers/hv/mshv_common.c2
-rw-r--r--drivers/hv/mshv_regions.c20
-rw-r--r--drivers/iommu/iommu-sva.c1
-rw-r--r--drivers/media/i2c/ov02c10.c28
-rw-r--r--drivers/media/pci/intel/Kconfig2
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c29
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-params.c7
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c41
-rw-r--r--drivers/net/can/Kconfig7
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c2
-rw-r--r--drivers/net/can/dev/Makefile5
-rw-r--r--drivers/net/can/dev/dev.c27
-rw-r--r--drivers/net/can/dev/netlink.c1
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/vcan.c15
-rw-r--r--drivers/net/can/vxcan.c15
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c15
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/macvlan.c20
-rw-r--r--drivers/net/phy/motorcomm.c4
-rw-r--r--drivers/net/virtio_net.c175
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c2
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/soundwire/slave.c1
-rw-r--r--drivers/ufs/core/ufshcd.c7
-rw-r--r--drivers/ufs/host/ufs-mediatek.c2
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/nfs/blocklayout/dev.c6
-rw-r--r--fs/nfs/delegation.c7
-rw-r--r--fs/nfs/dir.c78
-rw-r--r--fs/nfs/file.c3
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c2
-rw-r--r--fs/nfs/inode.c10
-rw-r--r--fs/nfs/io.c2
-rw-r--r--fs/nfs/localio.c32
-rw-r--r--fs/nfs/nfs42proc.c29
-rw-r--r--fs/nfs/nfs4proc.c53
-rw-r--r--fs/nfs/nfs4state.c6
-rw-r--r--fs/nfs/nfstrace.h3
-rw-r--r--fs/nfs/pnfs.c58
-rw-r--r--fs/nfs/pnfs.h17
-rw-r--r--fs/nfs/write.c33
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c11
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.c53
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.h2
-rw-r--r--fs/xfs/xfs_log.c8
-rw-r--r--fs/xfs/xfs_rtalloc.c6
-rw-r--r--include/hyperv/hvgdk_mini.h7
-rw-r--r--include/linux/can/can-ml.h24
-rw-r--r--include/linux/can/dev.h8
-rw-r--r--include/linux/cgroup-defs.h25
-rw-r--r--include/linux/kfence.h1
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/sched/mm.h1
-rw-r--r--include/linux/soc/airoha/airoha_offload.h4
-rw-r--r--include/linux/textsearch.h1
-rw-r--r--include/net/dropreason-core.h6
-rw-r--r--include/net/hotdata.h1
-rw-r--r--include/net/ip_tunnels.h13
-rw-r--r--include/scsi/scsi_eh.h6
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/uapi/linux/media/arm/mali-c55-config.h9
-rw-r--r--kernel/bpf/verifier.c5
-rw-r--r--kernel/cgroup/cgroup.c2
-rw-r--r--kernel/liveupdate/kexec_handover.c37
-rw-r--r--kernel/module/kmod.c1
-rw-r--r--kernel/printk/nbcon.c38
-rw-r--r--kernel/trace/ftrace.c29
-rw-r--r--kernel/watchdog.c2
-rw-r--r--lib/buildid.c32
-rw-r--r--mm/damon/core.c41
-rw-r--r--mm/damon/sysfs-schemes.c10
-rw-r--r--mm/damon/sysfs.c9
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/kmsan/shadow.c2
-rw-r--r--mm/numa_memblks.c2
-rw-r--r--mm/page_alloc.c57
-rw-r--r--mm/vma.c111
-rw-r--r--mm/vma.h3
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/zswap.c2
-rw-r--r--net/bluetooth/hci_sync.c1
-rw-r--r--net/bpf/test_run.c25
-rw-r--r--net/bridge/br_fdb.c28
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/can/j1939/transport.c10
-rw-r--r--net/can/raw.c51
-rw-r--r--net/core/dev.c31
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/hotdata.c1
-rw-r--r--net/core/sysctl_net_core.c7
-rw-r--r--net/ipv4/esp4_offload.c4
-rw-r--r--net/ipv4/ip_gre.c11
-rw-r--r--net/ipv4/ip_tunnel.c5
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/esp6_offload.c4
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/sched/sch_qfq.c6
-rw-r--r--net/xfrm/xfrm_state.c1
-rw-r--r--rust/helpers/bitops.c42
-rw-r--r--sound/core/oss/pcm_oss.c4
-rw-r--r--sound/core/pcm_native.c9
-rw-r--r--sound/hda/codecs/realtek/alc269.c2
-rw-r--r--sound/hda/codecs/side-codecs/cirrus_scodec_test.c3
-rw-r--r--sound/hda/codecs/side-codecs/tas2781_hda_i2c.c18
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c7
-rw-r--r--sound/soc/codecs/tlv320adcx140.c13
-rw-r--r--sound/soc/codecs/wsa881x.c9
-rw-r--r--sound/soc/codecs/wsa883x.c26
-rw-r--r--sound/soc/codecs/wsa884x.c3
-rw-r--r--sound/soc/generic/simple-card-utils.c4
-rw-r--r--sound/soc/intel/boards/sof_sdw.c8
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs42l43.c2
-rw-r--r--sound/soc/sdw_utils/soc_sdw_utils.c43
-rw-r--r--sound/soc/soc-ops.c4
-rw-r--r--sound/soc/tegra/tegra210_ahub.c6
-rw-r--r--sound/soc/ti/davinci-evm.c39
-rw-r--r--sound/usb/pcm.c2
-rw-r--r--tools/net/ynl/pyynl/lib/doc_generator.py9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c14
-rw-r--r--tools/testing/selftests/drivers/net/hw/toeplitz.c4
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/toeplitz.py6
-rw-r--r--tools/testing/selftests/kvm/x86/amx_test.c144
-rw-r--r--tools/testing/selftests/mm/gup_longterm.c2
-rw-r--r--tools/testing/selftests/mm/merge.c384
-rw-r--r--tools/testing/vsock/util.c12
156 files changed, 1938 insertions, 804 deletions
diff --git a/.mailmap b/.mailmap
index fa018b5bd533..4a8a160f28ed 100644
--- a/.mailmap
+++ b/.mailmap
@@ -207,6 +207,7 @@ Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
+Daniel Thompson <danielt@kernel.org> <daniel.thompson@linaro.org>
Danilo Krummrich <dakr@kernel.org> <dakr@redhat.com>
David Brownell <david-b@pacbell.net>
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
@@ -794,6 +795,7 @@ Sven Eckelmann <sven@narfation.org> <sven.eckelmann@open-mesh.com>
Sven Eckelmann <sven@narfation.org> <sven.eckelmann@openmesh.com>
Sven Eckelmann <sven@narfation.org> <sven@open-mesh.com>
Sven Peter <sven@kernel.org> <sven@svenpeter.dev>
+Szymon Wilczek <swilczek.lx@gmail.com> <szymonwilczek@gmx.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Tamizh Chelvam Raja <quic_tamizhr@quicinc.com> <tamizhr@codeaurora.org>
Taniya Das <quic_tdas@quicinc.com> <tdas@codeaurora.org>
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a8d0afde7f85..1058f2a6d6a8 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2917,6 +2917,41 @@ Kernel parameters
for Movable pages. "nn[KMGTPE]", "nn%", and "mirror"
are exclusive, so you cannot specify multiple forms.
+ kfence.burst= [MM,KFENCE] The number of additional successive
+ allocations to be attempted through KFENCE for each
+ sample interval.
+ Format: <unsigned integer>
+ Default: 0
+
+ kfence.check_on_panic=
+ [MM,KFENCE] Whether to check all KFENCE-managed objects'
+ canaries on panic.
+ Format: <bool>
+ Default: false
+
+ kfence.deferrable=
+ [MM,KFENCE] Whether to use a deferrable timer to trigger
+ allocations. This avoids forcing CPU wake-ups if the
+ system is idle, at the risk of a less predictable
+ sample interval.
+ Format: <bool>
+ Default: CONFIG_KFENCE_DEFERRABLE
+
+ kfence.sample_interval=
+ [MM,KFENCE] KFENCE's sample interval in milliseconds.
+ Format: <unsigned integer>
+ 0 - Disable KFENCE.
+ >0 - Enabled KFENCE with given sample interval.
+ Default: CONFIG_KFENCE_SAMPLE_INTERVAL
+
+ kfence.skip_covered_thresh=
+ [MM,KFENCE] If pool utilization reaches this threshold
+ (pool usage%), KFENCE limits currently covered
+ allocations of the same source from further filling
+ up the pool.
+ Format: <unsigned integer>
+ Default: 75
+
kgdbdbgp= [KGDB,HW,EARLY] kgdb over EHCI usb debug port.
Format: <Controller#>[,poll interval]
The controller # is the number of the ehci usb debug
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 369a738a6819..91fa4ccd326c 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -303,6 +303,14 @@ netdev_max_backlog
Maximum number of packets, queued on the INPUT side, when the interface
receives packets faster than kernel can process them.
+qdisc_max_burst
+------------------
+
+Maximum number of packets that can be temporarily stored before
+reaching qdisc.
+
+Default: 1000
+
netdev_rss_key
--------------
diff --git a/Documentation/devicetree/bindings/sound/everest,es8316.yaml b/Documentation/devicetree/bindings/sound/everest,es8316.yaml
index 81a0215050e0..fe5d938ca310 100644
--- a/Documentation/devicetree/bindings/sound/everest,es8316.yaml
+++ b/Documentation/devicetree/bindings/sound/everest,es8316.yaml
@@ -49,6 +49,10 @@ properties:
items:
- const: mclk
+ interrupts:
+ maxItems: 1
+ description: Headphone detect interrupt
+
port:
$ref: audio-graph-port.yaml#
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/sound/realtek,rt5640.yaml b/Documentation/devicetree/bindings/sound/realtek,rt5640.yaml
index 3f4f59287c1c..2eb631950963 100644
--- a/Documentation/devicetree/bindings/sound/realtek,rt5640.yaml
+++ b/Documentation/devicetree/bindings/sound/realtek,rt5640.yaml
@@ -47,6 +47,12 @@ properties:
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: mclk
+
interrupts:
maxItems: 1
description: The CODEC's interrupt output.
@@ -98,6 +104,7 @@ properties:
- 4 # Use GPIO2 for jack-detect
- 5 # Use GPIO3 for jack-detect
- 6 # Use GPIO4 for jack-detect
+ - 7 # Use HDA header for jack-detect
realtek,jack-detect-not-inverted:
description:
@@ -121,6 +128,10 @@ properties:
- 2 # Scale current by 1.0
- 3 # Scale current by 1.5
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml b/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
index 32dea7392e8d..56c755c22945 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
+++ b/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
@@ -70,6 +70,9 @@ properties:
"#sound-dai-cells":
const: 0
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/ufs/ufs-common.yaml b/Documentation/devicetree/bindings/ufs/ufs-common.yaml
index 9f04f34d8c5a..ed97f5682509 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-common.yaml
+++ b/Documentation/devicetree/bindings/ufs/ufs-common.yaml
@@ -48,8 +48,8 @@ properties:
enum: [1, 2]
default: 2
description:
- Number of lanes available per direction. Note that it is assume same
- number of lanes is used both directions at once.
+ Number of lanes available per direction. Note that it is assumed that
+ the same number of lanes are used in both directions at once.
vdd-hba-supply:
description:
diff --git a/Documentation/userspace-api/media/v4l/metafmt-arm-mali-c55.rst b/Documentation/userspace-api/media/v4l/metafmt-arm-mali-c55.rst
index 696e0a645a7e..f8029bcb5282 100644
--- a/Documentation/userspace-api/media/v4l/metafmt-arm-mali-c55.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-arm-mali-c55.rst
@@ -44,7 +44,7 @@ member and userspace must populate the type member with a value from
struct v4l2_isp_params_buffer *params =
(struct v4l2_isp_params_buffer *)buffer;
- params->version = MALI_C55_PARAM_BUFFER_V1;
+ params->version = V4L2_ISP_PARAMS_VERSION_V1;
params->data_size = 0;
void *data = (void *)params->data;
diff --git a/MAINTAINERS b/MAINTAINERS
index d0ea96323ba0..41ace5aeecbe 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -314,6 +314,7 @@ R: Mauro Carvalho Chehab <mchehab@kernel.org>
R: Shuai Xue <xueshuai@linux.alibaba.com>
L: linux-acpi@vger.kernel.org
F: drivers/acpi/apei/
+F: drivers/firmware/efi/cper*
ACPI COMPONENT ARCHITECTURE (ACPICA)
M: "Rafael J. Wysocki" <rafael@kernel.org>
@@ -9516,6 +9517,7 @@ F: arch/arm/boot/compressed/efi-header.S
F: arch/x86/platform/efi/
F: drivers/firmware/efi/
F: include/linux/efi*.h
+X: drivers/firmware/efi/cper*
EXTERNAL CONNECTOR SUBSYSTEM (EXTCON)
M: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -14880,6 +14882,7 @@ LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Sathya Prakash <sathya.prakash@broadcom.com>
M: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
M: Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com>
+M: Ranjan Kumar <ranjan.kumar@broadcom.com>
L: MPT-FusionLinux.pdl@broadcom.com
L: linux-scsi@vger.kernel.org
S: Supported
@@ -18423,9 +18426,11 @@ M: Jakub Kicinski <kuba@kernel.org>
M: Sabrina Dubroca <sd@queasysnail.net>
L: netdev@vger.kernel.org
S: Maintained
+F: Documentation/networking/tls*
F: include/net/tls.h
F: include/uapi/linux/tls.h
-F: net/tls/*
+F: net/tls/
+F: tools/testing/selftests/net/tls.c
NETWORKING [SOCKETS]
M: Eric Dumazet <edumazet@google.com>
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index a673d3d68254..8986048f9b11 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -425,6 +425,28 @@ void __init paging_init(void)
static struct kcore_list kcore_kseg0;
#endif
+static inline void __init highmem_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long tmp;
+
+ /*
+ * If CPU cannot support HIGHMEM discard the memory above highstart_pfn
+ */
+ if (cpu_has_dc_aliases) {
+ memblock_remove(PFN_PHYS(highstart_pfn), -1);
+ return;
+ }
+
+ for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
+ struct page *page = pfn_to_page(tmp);
+
+ if (!memblock_is_memory(PFN_PHYS(tmp)))
+ SetPageReserved(page);
+ }
+#endif
+}
+
void __init arch_mm_preinit(void)
{
/*
@@ -435,6 +457,7 @@ void __init arch_mm_preinit(void)
maar_init();
setup_zero_pages(); /* Setup zeroed pages. */
+ highmem_init();
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 2429cb1c7baa..764001deb060 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/processor.h>
#include <linux/smp.h>
+#include <linux/sys_info.h>
#include <asm/interrupt.h>
#include <asm/paca.h>
@@ -235,7 +236,11 @@ static void watchdog_smp_panic(int cpu)
pr_emerg("CPU %d TB:%lld, last SMP heartbeat TB:%lld (%lldms ago)\n",
cpu, tb, last_reset, tb_to_ns(tb - last_reset) / 1000000);
- if (!sysctl_hardlockup_all_cpu_backtrace) {
+ if (sysctl_hardlockup_all_cpu_backtrace ||
+ (hardlockup_si_mask & SYS_INFO_ALL_BT)) {
+ trigger_allbutcpu_cpu_backtrace(cpu);
+ cpumask_clear(&wd_smp_cpus_ipi);
+ } else {
/*
* Try to trigger the stuck CPUs, unless we are going to
* get a backtrace on all of them anyway.
@@ -244,11 +249,9 @@ static void watchdog_smp_panic(int cpu)
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
__cpumask_clear_cpu(c, &wd_smp_cpus_ipi);
}
- } else {
- trigger_allbutcpu_cpu_backtrace(cpu);
- cpumask_clear(&wd_smp_cpus_ipi);
}
+ sys_info(hardlockup_si_mask & ~SYS_INFO_ALL_BT);
if (hardlockup_panic)
nmi_panic(NULL, "Hard LOCKUP");
@@ -415,9 +418,11 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
xchg(&__wd_nmi_output, 1); // see wd_lockup_ipi
- if (sysctl_hardlockup_all_cpu_backtrace)
+ if (sysctl_hardlockup_all_cpu_backtrace ||
+ (hardlockup_si_mask & SYS_INFO_ALL_BT))
trigger_allbutcpu_cpu_backtrace(cpu);
+ sys_info(hardlockup_si_mask & ~SYS_INFO_ALL_BT);
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 5f9457e910e8..37888abee70c 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -1133,10 +1133,6 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
store_args(nr_arg_slots, args_off, ctx);
- /* skip to actual body of traced function */
- if (flags & BPF_TRAMP_F_ORIG_STACK)
- orig_call += RV_FENTRY_NINSNS * 4;
-
if (flags & BPF_TRAMP_F_CALL_ORIG) {
emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx);
ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
@@ -1171,6 +1167,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ /* skip to actual body of traced function */
+ orig_call += RV_FENTRY_NINSNS * 4;
restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx);
restore_stack_args(nr_arg_slots - RV_MAX_REG_ARGS, args_off, stk_arg_off, ctx);
ret = emit_call((const u64)orig_call, true, ctx);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index da233f20ae6f..608983806fd7 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -319,10 +319,29 @@ EXPORT_SYMBOL_FOR_KVM(fpu_enable_guest_xfd_features);
#ifdef CONFIG_X86_64
void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
{
+ struct fpstate *fpstate = guest_fpu->fpstate;
+
fpregs_lock();
- guest_fpu->fpstate->xfd = xfd;
- if (guest_fpu->fpstate->in_use)
- xfd_update_state(guest_fpu->fpstate);
+
+ /*
+ * KVM's guest ABI is that setting XFD[i]=1 *can* immediately revert the
+ * save state to its initial configuration. Likewise, KVM_GET_XSAVE does
+ * the same as XSAVE and returns XSTATE_BV[i]=0 whenever XFD[i]=1.
+ *
+ * If the guest's FPU state is in hardware, just update XFD: the XSAVE
+ * in fpu_swap_kvm_fpstate will clear XSTATE_BV[i] whenever XFD[i]=1.
+ *
+ * If however the guest's FPU state is NOT resident in hardware, clear
+ * disabled components in XSTATE_BV now, or a subsequent XRSTOR will
+ * attempt to load disabled components and generate #NM _in the host_.
+ */
+ if (xfd && test_thread_flag(TIF_NEED_FPU_LOAD))
+ fpstate->regs.xsave.header.xfeatures &= ~xfd;
+
+ fpstate->xfd = xfd;
+ if (fpstate->in_use)
+ xfd_update_state(fpstate);
+
fpregs_unlock();
}
EXPORT_SYMBOL_FOR_KVM(fpu_update_guest_xfd);
@@ -431,6 +450,13 @@ int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
return -EINVAL;
/*
+ * Disabled features must be in their initial state, otherwise XRSTOR
+ * causes an exception.
+ */
+ if (WARN_ON_ONCE(ustate->xsave.header.xfeatures & kstate->xfd))
+ return -EINVAL;
+
+ /*
* Nullify @vpkru to preserve its current value if PKRU's bit isn't set
* in the header. KVM's odd ABI is to leave PKRU untouched in this
* case (all other components are eventually re-initialized).
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index df78ddee0abb..37dc8465e0f5 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -89,6 +89,7 @@ struct kvm_task_sleep_node {
struct swait_queue_head wq;
u32 token;
int cpu;
+ bool dummy;
};
static struct kvm_task_sleep_head {
@@ -120,15 +121,26 @@ static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
raw_spin_lock(&b->lock);
e = _find_apf_task(b, token);
if (e) {
- /* dummy entry exist -> wake up was delivered ahead of PF */
- hlist_del(&e->link);
+ struct kvm_task_sleep_node *dummy = NULL;
+
+ /*
+ * The entry can either be a 'dummy' entry (which is put on the
+ * list when wake-up happens ahead of APF handling completion)
+ * or a token from another task which should not be touched.
+ */
+ if (e->dummy) {
+ hlist_del(&e->link);
+ dummy = e;
+ }
+
raw_spin_unlock(&b->lock);
- kfree(e);
+ kfree(dummy);
return false;
}
n->token = token;
n->cpu = smp_processor_id();
+ n->dummy = false;
init_swait_queue_head(&n->wq);
hlist_add_head(&n->link, &b->list);
raw_spin_unlock(&b->lock);
@@ -231,6 +243,7 @@ again:
}
dummy->token = token;
dummy->cpu = smp_processor_id();
+ dummy->dummy = true;
init_swait_queue_head(&dummy->wq);
hlist_add_head(&dummy->link, &b->list);
dummy = NULL;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ff8812f3a129..63afdb6bb078 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5807,9 +5807,18 @@ static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
+ union fpregs_state *xstate = (union fpregs_state *)guest_xsave->region;
+
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
+ /*
+ * For backwards compatibility, do not expect disabled features to be in
+ * their initial state. XSTATE_BV[i] must still be cleared whenever
+ * XFD[i]=1, or XRSTOR would cause a #NM.
+ */
+ xstate->xsave.header.xfeatures &= ~vcpu->arch.guest_fpu.fpstate->xfd;
+
return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
guest_xsave->region,
kvm_caps.supported_xcr0,
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 6d4d06236f61..cc3c83e4cc23 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -28,6 +28,10 @@ static bool sleep_no_lps0 __read_mostly;
module_param(sleep_no_lps0, bool, 0644);
MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
+static bool check_lps0_constraints __read_mostly;
+module_param(check_lps0_constraints, bool, 0644);
+MODULE_PARM_DESC(check_lps0_constraints, "Check LPS0 device constraints");
+
static const struct acpi_device_id lps0_device_ids[] = {
{"PNP0D80", },
{"", },
@@ -515,7 +519,8 @@ static struct acpi_scan_handler lps0_handler = {
static int acpi_s2idle_begin_lps0(void)
{
- if (pm_debug_messages_on && !lpi_constraints_table) {
+ if (lps0_device_handle && !sleep_no_lps0 && check_lps0_constraints &&
+ !lpi_constraints_table) {
if (acpi_s2idle_vendor_amd())
lpi_device_get_constraints_amd();
else
@@ -539,7 +544,7 @@ static int acpi_s2idle_prepare_late_lps0(void)
if (!lps0_device_handle || sleep_no_lps0)
return 0;
- if (pm_debug_messages_on)
+ if (check_lps0_constraints)
lpi_check_constraints();
/* Screen off */
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 0867115aeef2..c6ae27c982f4 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -67,14 +67,16 @@ struct dev_dax_range {
/**
* struct dev_dax - instance data for a subdivision of a dax region, and
* data while the device is activated in the driver.
- * @region - parent region
- * @dax_dev - core dax functionality
+ * @region: parent region
+ * @dax_dev: core dax functionality
+ * @align: alignment of this instance
* @target_node: effective numa node if dev_dax memory range is onlined
* @dyn_id: is this a dynamic or statically created instance
* @id: ida allocated id when the dax_region is not static
* @ida: mapping id allocator
- * @dev - device core
- * @pgmap - pgmap for memmap setup / lifetime (driver owned)
+ * @dev: device core
+ * @pgmap: pgmap for memmap setup / lifetime (driver owned)
+ * @memmap_on_memory: allow kmem to put the memmap in the memory
* @nr_range: size of @ranges
* @ranges: range tuples of memory used
*/
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 0232bd040f61..bd99802cb0ca 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -162,7 +162,7 @@ int cper_bits_to_str(char *buf, int buf_size, unsigned long bits,
len -= size;
str += size;
}
- return len - buf_size;
+ return buf_size - len;
}
EXPORT_SYMBOL_GPL(cper_bits_to_str);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 55452e61af31..f5ff6e84a9b7 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -819,6 +819,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
if (tbl) {
phys_initrd_start = tbl->base;
phys_initrd_size = tbl->size;
+ tbl->base = tbl->size = 0;
early_memunmap(tbl, sizeof(*tbl));
}
}
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 538f27209ce7..97780f27ce5b 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -6,6 +6,7 @@
* Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com>
*/
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -109,6 +110,22 @@ davinci_direction_out(struct gpio_chip *chip, unsigned offset, int value)
return __davinci_direction(chip, offset, true, value);
}
+static int davinci_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct davinci_gpio_controller *d = gpiochip_get_data(chip);
+ struct davinci_gpio_regs __iomem *g;
+ u32 mask = __gpio_mask(offset), val;
+ int bank = offset / 32;
+
+ g = d->regs[bank];
+
+ guard(spinlock_irqsave)(&d->lock);
+
+ val = readl_relaxed(&g->dir);
+
+ return (val & mask) ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
/*
* Read the pin's value (works even if it's set up as output);
* returns zero/nonzero.
@@ -203,6 +220,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.get = davinci_gpio_get;
chips->chip.direction_output = davinci_direction_out;
chips->chip.set = davinci_gpio_set;
+ chips->chip.get_direction = davinci_get_direction;
chips->chip.ngpio = ngpio;
chips->chip.base = -1;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index dcf427d3cf43..fe2d107b0a84 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -468,9 +468,6 @@ int gpiod_get_direction(struct gpio_desc *desc)
test_bit(GPIOD_FLAG_IS_OUT, &flags))
return 0;
- if (!guard.gc->get_direction)
- return -ENOTSUPP;
-
ret = gpiochip_get_direction(guard.gc, offset);
if (ret < 0)
return ret;
diff --git a/drivers/hv/mshv_common.c b/drivers/hv/mshv_common.c
index 58027b23c206..63f09bb5136e 100644
--- a/drivers/hv/mshv_common.c
+++ b/drivers/hv/mshv_common.c
@@ -142,6 +142,7 @@ int hv_call_get_partition_property(u64 partition_id,
}
EXPORT_SYMBOL_GPL(hv_call_get_partition_property);
+#ifdef CONFIG_X86
/*
* Corresponding sleep states have to be initialized in order for a subsequent
* HVCALL_ENTER_SLEEP_STATE call to succeed. Currently only S5 state as per
@@ -237,3 +238,4 @@ void hv_machine_power_off(void)
BUG();
}
+#endif
diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c
index 202b9d551e39..30bacba6aec3 100644
--- a/drivers/hv/mshv_regions.c
+++ b/drivers/hv/mshv_regions.c
@@ -58,7 +58,7 @@ static long mshv_region_process_chunk(struct mshv_mem_region *region,
page_order = folio_order(page_folio(page));
/* The hypervisor only supports 4K and 2M page sizes */
- if (page_order && page_order != HPAGE_PMD_ORDER)
+ if (page_order && page_order != PMD_ORDER)
return -EINVAL;
stride = 1 << page_order;
@@ -494,13 +494,6 @@ static bool mshv_region_interval_invalidate(struct mmu_interval_notifier *mni,
unsigned long mstart, mend;
int ret = -EPERM;
- if (mmu_notifier_range_blockable(range))
- mutex_lock(&region->mutex);
- else if (!mutex_trylock(&region->mutex))
- goto out_fail;
-
- mmu_interval_set_seq(mni, cur_seq);
-
mstart = max(range->start, region->start_uaddr);
mend = min(range->end, region->start_uaddr +
(region->nr_pages << HV_HYP_PAGE_SHIFT));
@@ -508,10 +501,17 @@ static bool mshv_region_interval_invalidate(struct mmu_interval_notifier *mni,
page_offset = HVPFN_DOWN(mstart - region->start_uaddr);
page_count = HVPFN_DOWN(mend - mstart);
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&region->mutex);
+ else if (!mutex_trylock(&region->mutex))
+ goto out_fail;
+
+ mmu_interval_set_seq(mni, cur_seq);
+
ret = mshv_region_remap_pages(region, HV_MAP_GPA_NO_ACCESS,
page_offset, page_count);
if (ret)
- goto out_fail;
+ goto out_unlock;
mshv_region_invalidate_pages(region, page_offset, page_count);
@@ -519,6 +519,8 @@ static bool mshv_region_interval_invalidate(struct mmu_interval_notifier *mni,
return true;
+out_unlock:
+ mutex_unlock(&region->mutex);
out_fail:
WARN_ONCE(ret,
"Failed to invalidate region %#llx-%#llx (range %#lx-%#lx, event: %u, pages %#llx-%#llx, mm: %#llx): %d\n",
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index d236aef80a8d..e1e63c2be82b 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -3,6 +3,7 @@
* Helpers for IOMMU drivers implementing SVA
*/
#include <linux/mmu_context.h>
+#include <linux/mmu_notifier.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include <linux/iommu.h>
diff --git a/drivers/media/i2c/ov02c10.c b/drivers/media/i2c/ov02c10.c
index b1e540eb8326..cf93d36032e1 100644
--- a/drivers/media/i2c/ov02c10.c
+++ b/drivers/media/i2c/ov02c10.c
@@ -165,16 +165,12 @@ static const struct reg_sequence sensor_1928x1092_30fps_setting[] = {
{0x3809, 0x88},
{0x380a, 0x04},
{0x380b, 0x44},
- {0x3810, 0x00},
- {0x3811, 0x02},
- {0x3812, 0x00},
- {0x3813, 0x02},
{0x3814, 0x01},
{0x3815, 0x01},
{0x3816, 0x01},
{0x3817, 0x01},
- {0x3820, 0xa0},
+ {0x3820, 0xa8},
{0x3821, 0x00},
{0x3822, 0x80},
{0x3823, 0x08},
@@ -385,8 +381,6 @@ struct ov02c10 {
struct v4l2_ctrl *vblank;
struct v4l2_ctrl *hblank;
struct v4l2_ctrl *exposure;
- struct v4l2_ctrl *hflip;
- struct v4l2_ctrl *vflip;
struct clk *img_clk;
struct gpio_desc *reset;
@@ -465,13 +459,17 @@ static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_HFLIP:
+ cci_write(ov02c10->regmap, OV02C10_ISP_X_WIN_CONTROL,
+ ctrl->val ? 2 : 1, &ret);
cci_update_bits(ov02c10->regmap, OV02C10_ROTATE_CONTROL,
- BIT(3), ov02c10->hflip->val << 3, &ret);
+ BIT(3), ctrl->val ? 0 : BIT(3), &ret);
break;
case V4L2_CID_VFLIP:
+ cci_write(ov02c10->regmap, OV02C10_ISP_Y_WIN_CONTROL,
+ ctrl->val ? 2 : 1, &ret);
cci_update_bits(ov02c10->regmap, OV02C10_ROTATE_CONTROL,
- BIT(4), ov02c10->vflip->val << 4, &ret);
+ BIT(4), ctrl->val << 4, &ret);
break;
default:
@@ -549,15 +547,11 @@ static int ov02c10_init_controls(struct ov02c10 *ov02c10)
OV02C10_EXPOSURE_STEP,
exposure_max);
- ov02c10->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- if (ov02c10->hflip)
- ov02c10->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
- ov02c10->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- if (ov02c10->vflip)
- ov02c10->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov02c10_ctrl_ops,
V4L2_CID_TEST_PATTERN,
diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig
index d9fcddce028b..3f14ca110d06 100644
--- a/drivers/media/pci/intel/Kconfig
+++ b/drivers/media/pci/intel/Kconfig
@@ -6,7 +6,7 @@ source "drivers/media/pci/intel/ivsc/Kconfig"
config IPU_BRIDGE
tristate "Intel IPU Bridge"
- depends on ACPI || COMPILE_TEST
+ depends on ACPI
depends on I2C
help
The IPU bridge is a helper library for Intel IPU drivers to
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index c9827e9fe6ff..b2b710094914 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -5,6 +5,7 @@
#include <acpi/acpi_bus.h>
#include <linux/cleanup.h>
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/mei_cl_bus.h>
#include <linux/platform_device.h>
@@ -98,6 +99,28 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
IPU_SENSOR_CONFIG("XMCC0003", 1, 321468000),
};
+/*
+ * DMI matches for laptops which have their sensor mounted upside-down
+ * without reporting a rotation of 180° in neither the SSDB nor the _PLD.
+ */
+static const struct dmi_system_id upside_down_sensor_dmi_ids[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 13 9350"),
+ },
+ .driver_data = "OVTI02C1",
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 16 9640"),
+ },
+ .driver_data = "OVTI02C1",
+ },
+ {} /* Terminating entry */
+};
+
static const struct ipu_property_names prop_names = {
.clock_frequency = "clock-frequency",
.rotation = "rotation",
@@ -248,6 +271,12 @@ out_free_buff:
static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
struct ipu_sensor_ssdb *ssdb)
{
+ const struct dmi_system_id *dmi_id;
+
+ dmi_id = dmi_first_match(upside_down_sensor_dmi_ids);
+ if (dmi_id && acpi_dev_hid_match(adev, dmi_id->driver_data))
+ return 180;
+
switch (ssdb->degree) {
case IPU_SENSOR_ROTATION_NORMAL:
return 0;
diff --git a/drivers/media/platform/arm/mali-c55/mali-c55-params.c b/drivers/media/platform/arm/mali-c55/mali-c55-params.c
index 082cda4f4f63..be0e909bcf29 100644
--- a/drivers/media/platform/arm/mali-c55/mali-c55-params.c
+++ b/drivers/media/platform/arm/mali-c55/mali-c55-params.c
@@ -582,13 +582,6 @@ static int mali_c55_params_buf_prepare(struct vb2_buffer *vb)
struct mali_c55 *mali_c55 = params->mali_c55;
int ret;
- if (config->version != MALI_C55_PARAM_BUFFER_V1) {
- dev_dbg(mali_c55->dev,
- "Unsupported extensible format version: %u\n",
- config->version);
- return -EINVAL;
- }
-
ret = v4l2_isp_params_validate_buffer_size(mali_c55->dev, vb,
v4l2_isp_params_buffer_size(MALI_C55_PARAMS_MAX_SIZE));
if (ret)
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
index 0fbdae280fdc..6dc4b53607b4 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -96,13 +96,6 @@
#define VSRSTS_RETRIES 20
-#define RZG2L_CSI2_MIN_WIDTH 320
-#define RZG2L_CSI2_MIN_HEIGHT 240
-#define RZG2L_CSI2_MAX_WIDTH 2800
-#define RZG2L_CSI2_MAX_HEIGHT 4095
-
-#define RZG2L_CSI2_DEFAULT_WIDTH RZG2L_CSI2_MIN_WIDTH
-#define RZG2L_CSI2_DEFAULT_HEIGHT RZG2L_CSI2_MIN_HEIGHT
#define RZG2L_CSI2_DEFAULT_FMT MEDIA_BUS_FMT_UYVY8_1X16
enum rzg2l_csi2_pads {
@@ -137,6 +130,10 @@ struct rzg2l_csi2_info {
int (*dphy_enable)(struct rzg2l_csi2 *csi2);
int (*dphy_disable)(struct rzg2l_csi2 *csi2);
bool has_system_clk;
+ unsigned int min_width;
+ unsigned int min_height;
+ unsigned int max_width;
+ unsigned int max_height;
};
struct rzg2l_csi2_timings {
@@ -418,6 +415,10 @@ static const struct rzg2l_csi2_info rzg2l_csi2_info = {
.dphy_enable = rzg2l_csi2_dphy_enable,
.dphy_disable = rzg2l_csi2_dphy_disable,
.has_system_clk = true,
+ .min_width = 320,
+ .min_height = 240,
+ .max_width = 2800,
+ .max_height = 4095,
};
static int rzg2l_csi2_dphy_setting(struct v4l2_subdev *sd, bool on)
@@ -542,6 +543,10 @@ static const struct rzg2l_csi2_info rzv2h_csi2_info = {
.dphy_enable = rzv2h_csi2_dphy_enable,
.dphy_disable = rzv2h_csi2_dphy_disable,
.has_system_clk = false,
+ .min_width = 320,
+ .min_height = 240,
+ .max_width = 4096,
+ .max_height = 4096,
};
static int rzg2l_csi2_mipi_link_setting(struct v4l2_subdev *sd, bool on)
@@ -631,6 +636,7 @@ static int rzg2l_csi2_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *fmt)
{
+ struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
struct v4l2_mbus_framefmt *src_format;
struct v4l2_mbus_framefmt *sink_format;
@@ -653,9 +659,11 @@ static int rzg2l_csi2_set_format(struct v4l2_subdev *sd,
sink_format->ycbcr_enc = fmt->format.ycbcr_enc;
sink_format->quantization = fmt->format.quantization;
sink_format->width = clamp_t(u32, fmt->format.width,
- RZG2L_CSI2_MIN_WIDTH, RZG2L_CSI2_MAX_WIDTH);
+ csi2->info->min_width,
+ csi2->info->max_width);
sink_format->height = clamp_t(u32, fmt->format.height,
- RZG2L_CSI2_MIN_HEIGHT, RZG2L_CSI2_MAX_HEIGHT);
+ csi2->info->min_height,
+ csi2->info->max_height);
fmt->format = *sink_format;
/* propagate format to source pad */
@@ -668,9 +676,10 @@ static int rzg2l_csi2_init_state(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { .pad = RZG2L_CSI2_SINK, };
+ struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
- fmt.format.width = RZG2L_CSI2_DEFAULT_WIDTH;
- fmt.format.height = RZG2L_CSI2_DEFAULT_HEIGHT;
+ fmt.format.width = csi2->info->min_width;
+ fmt.format.height = csi2->info->min_height;
fmt.format.field = V4L2_FIELD_NONE;
fmt.format.code = RZG2L_CSI2_DEFAULT_FMT;
fmt.format.colorspace = V4L2_COLORSPACE_SRGB;
@@ -697,16 +706,18 @@ static int rzg2l_csi2_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
+
if (fse->index != 0)
return -EINVAL;
if (!rzg2l_csi2_code_to_fmt(fse->code))
return -EINVAL;
- fse->min_width = RZG2L_CSI2_MIN_WIDTH;
- fse->min_height = RZG2L_CSI2_MIN_HEIGHT;
- fse->max_width = RZG2L_CSI2_MAX_WIDTH;
- fse->max_height = RZG2L_CSI2_MAX_HEIGHT;
+ fse->min_width = csi2->info->min_width;
+ fse->min_height = csi2->info->min_height;
+ fse->max_width = csi2->info->max_width;
+ fse->max_height = csi2->info->max_height;
return 0;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index cfaea6178a71..e15e320db476 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CAN_DEV
- bool "CAN Device Drivers"
+ tristate "CAN Device Drivers"
default y
depends on CAN
help
@@ -17,7 +17,10 @@ menuconfig CAN_DEV
virtual ones. If you own such devices or plan to use the virtual CAN
interfaces to develop applications, say Y here.
-if CAN_DEV && CAN
+ To compile as a module, choose M here: the module will be called
+ can-dev.
+
+if CAN_DEV
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 37e2f1a2faec..d7bc10a6b8ea 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan/
-obj-$(CONFIG_CAN_DEV) += dev/
+obj-y += dev/
obj-y += esd/
obj-y += rcar/
obj-y += rockchip/
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 1e6b9e3dc2fe..0ea1ff28dfce 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -310,7 +310,7 @@ static int ctucan_set_secondary_sample_point(struct net_device *ndev)
}
ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset);
- ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1);
+ ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x0);
}
ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg);
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
index 64226acf0f3d..633687d6b6c0 100644
--- a/drivers/net/can/dev/Makefile
+++ b/drivers/net/can/dev/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CAN) += can-dev.o
+obj-$(CONFIG_CAN_DEV) += can-dev.o
+
+can-dev-y += skb.o
-can-dev-$(CONFIG_CAN_DEV) += skb.o
can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o
can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o
can-dev-$(CONFIG_CAN_NETLINK) += dev.o
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 091f30e94c61..7ab9578f5b89 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -375,6 +375,32 @@ void can_set_default_mtu(struct net_device *dev)
}
}
+void can_set_cap_info(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ u32 can_cap;
+
+ if (can_dev_in_xl_only_mode(priv)) {
+ /* XL only mode => no CC/FD capability */
+ can_cap = CAN_CAP_XL;
+ } else {
+ /* mixed mode => CC + FD/XL capability */
+ can_cap = CAN_CAP_CC;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_FD)
+ can_cap |= CAN_CAP_FD;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_XL)
+ can_cap |= CAN_CAP_XL;
+ }
+
+ if (priv->ctrlmode & (CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_RESTRICTED))
+ can_cap |= CAN_CAP_RO;
+
+ can_set_cap(dev, can_cap);
+}
+
/* helper to define static CAN controller features at device creation time */
int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
{
@@ -390,6 +416,7 @@ int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
/* override MTU which was set by default in can_setup()? */
can_set_default_mtu(dev);
+ can_set_cap_info(dev);
return 0;
}
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index d6b0e686fb11..0498198a4696 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -377,6 +377,7 @@ static int can_ctrlmode_changelink(struct net_device *dev,
}
can_set_default_mtu(dev);
+ can_set_cap_info(dev);
return 0;
}
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index f799233c2b72..2d248deb69dc 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -1736,7 +1736,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n",
__func__, i, rx_buf_len);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index a0233e550a5a..d093babbc320 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -751,6 +751,8 @@ resubmit_urb:
hf, parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
+ usb_anchor_urb(urb, &parent->rx_submitted);
+
rc = usb_submit_urb(urb, GFP_ATOMIC);
/* USB failure take down all interfaces */
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index fdc662aea279..76e6b7b5c6a1 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -130,6 +130,19 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static void vcan_set_cap_info(struct net_device *dev)
+{
+ u32 can_cap = CAN_CAP_CC;
+
+ if (dev->mtu > CAN_MTU)
+ can_cap |= CAN_CAP_FD;
+
+ if (dev->mtu >= CANXL_MIN_MTU)
+ can_cap |= CAN_CAP_XL;
+
+ can_set_cap(dev, can_cap);
+}
+
static int vcan_change_mtu(struct net_device *dev, int new_mtu)
{
/* Do not allow changing the MTU while running */
@@ -141,6 +154,7 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
WRITE_ONCE(dev->mtu, new_mtu);
+ vcan_set_cap_info(dev);
return 0;
}
@@ -162,6 +176,7 @@ static void vcan_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->flags = IFF_NOARP;
can_set_ml_priv(dev, netdev_priv(dev));
+ vcan_set_cap_info(dev);
/* set flags according to driver capabilities */
if (echo)
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index b2c19f8c5f8e..f14c6f02b662 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -125,6 +125,19 @@ static int vxcan_get_iflink(const struct net_device *dev)
return iflink;
}
+static void vxcan_set_cap_info(struct net_device *dev)
+{
+ u32 can_cap = CAN_CAP_CC;
+
+ if (dev->mtu > CAN_MTU)
+ can_cap |= CAN_CAP_FD;
+
+ if (dev->mtu >= CANXL_MIN_MTU)
+ can_cap |= CAN_CAP_XL;
+
+ can_set_cap(dev, can_cap);
+}
+
static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
{
/* Do not allow changing the MTU while running */
@@ -136,6 +149,7 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
WRITE_ONCE(dev->mtu, new_mtu);
+ vxcan_set_cap_info(dev);
return 0;
}
@@ -167,6 +181,7 @@ static void vxcan_setup(struct net_device *dev)
can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
can_set_ml_priv(dev, can_ml);
+ vxcan_set_cap_info(dev);
}
/* forward declaration for rtnl_create_link() */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index 420c3f4cf741..1d9760b4b8f4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -218,7 +218,7 @@ static int octep_vf_request_irqs(struct octep_vf_device *oct)
ioq_irq_err:
while (i) {
--i;
- free_irq(oct->msix_entries[i].vector, oct);
+ free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
}
return -1;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 262dc032e276..ff4ab4691baf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -962,7 +962,7 @@ struct mlx5e_priv {
};
struct mlx5e_dev {
- struct mlx5e_priv *priv;
+ struct net_device *netdev;
struct devlink_port dl_port;
};
@@ -1242,10 +1242,13 @@ struct net_device *
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
-void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
-int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
- const struct mlx5e_profile *new_profile, void *new_ppriv);
-void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
+void mlx5e_destroy_netdev(struct net_device *netdev);
+int mlx5e_netdev_change_profile(struct net_device *netdev,
+ struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *new_profile,
+ void *new_ppriv);
+void mlx5e_netdev_attach_nic_profile(struct net_device *netdev,
+ struct mlx5_core_dev *mdev);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 07fc4d2c8fad..9042c8a388e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -6325,6 +6325,7 @@ err_free_cpumask:
void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
{
+ bool destroying = test_bit(MLX5E_STATE_DESTROYING, &priv->state);
int i;
/* bail if change profile failed and also rollback failed */
@@ -6352,6 +6353,8 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
}
memset(priv, 0, sizeof(*priv));
+ if (destroying) /* restore destroying bit, to allow unload */
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
}
static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
@@ -6584,19 +6587,28 @@ profile_cleanup:
return err;
}
-int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
- const struct mlx5e_profile *new_profile, void *new_ppriv)
+int mlx5e_netdev_change_profile(struct net_device *netdev,
+ struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *new_profile,
+ void *new_ppriv)
{
- const struct mlx5e_profile *orig_profile = priv->profile;
- struct net_device *netdev = priv->netdev;
- struct mlx5_core_dev *mdev = priv->mdev;
- void *orig_ppriv = priv->ppriv;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ const struct mlx5e_profile *orig_profile;
int err, rollback_err;
+ void *orig_ppriv;
- /* cleanup old profile */
- mlx5e_detach_netdev(priv);
- priv->profile->cleanup(priv);
- mlx5e_priv_cleanup(priv);
+ orig_profile = priv->profile;
+ orig_ppriv = priv->ppriv;
+
+ /* NULL could happen if previous change_profile failed to rollback */
+ if (priv->profile) {
+ WARN_ON_ONCE(priv->mdev != mdev);
+ /* cleanup old profile */
+ mlx5e_detach_netdev(priv);
+ priv->profile->cleanup(priv);
+ mlx5e_priv_cleanup(priv);
+ }
+ /* priv members are not valid from this point ... */
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
@@ -6613,23 +6625,33 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
return 0;
rollback:
+ if (!orig_profile) {
+ netdev_warn(netdev, "no original profile to rollback to\n");
+ priv->profile = NULL;
+ return err;
+ }
+
rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
- if (rollback_err)
- netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
- __func__, rollback_err);
+ if (rollback_err) {
+ netdev_err(netdev, "failed to rollback to orig profile, %d\n",
+ rollback_err);
+ priv->profile = NULL;
+ }
return err;
}
-void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv)
+void mlx5e_netdev_attach_nic_profile(struct net_device *netdev,
+ struct mlx5_core_dev *mdev)
{
- mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL);
+ mlx5e_netdev_change_profile(netdev, mdev, &mlx5e_nic_profile, NULL);
}
-void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
+void mlx5e_destroy_netdev(struct net_device *netdev)
{
- struct net_device *netdev = priv->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5e_priv_cleanup(priv);
+ if (priv->profile)
+ mlx5e_priv_cleanup(priv);
free_netdev(netdev);
}
@@ -6637,8 +6659,8 @@ static int _mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
- struct mlx5e_priv *priv = mlx5e_dev->priv;
- struct net_device *netdev = priv->netdev;
+ struct mlx5e_priv *priv = netdev_priv(mlx5e_dev->netdev);
+ struct net_device *netdev = mlx5e_dev->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
struct mlx5_core_dev *pos, *to;
int err, i;
@@ -6684,10 +6706,11 @@ static int mlx5e_resume(struct auxiliary_device *adev)
static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
- struct mlx5e_priv *priv = mlx5e_dev->priv;
- struct net_device *netdev = priv->netdev;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_priv *priv = netdev_priv(mlx5e_dev->netdev);
+ struct net_device *netdev = mlx5e_dev->netdev;
+ struct mlx5_core_dev *mdev = edev->mdev;
struct mlx5_core_dev *pos;
int i;
@@ -6748,11 +6771,11 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
goto err_devlink_port_unregister;
}
SET_NETDEV_DEVLINK_PORT(netdev, &mlx5e_dev->dl_port);
+ mlx5e_dev->netdev = netdev;
mlx5e_build_nic_netdev(netdev);
priv = netdev_priv(netdev);
- mlx5e_dev->priv = priv;
priv->profile = profile;
priv->ppriv = NULL;
@@ -6785,7 +6808,7 @@ err_resume:
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
- mlx5e_destroy_netdev(priv);
+ mlx5e_destroy_netdev(netdev);
err_devlink_port_unregister:
mlx5e_devlink_port_unregister(mlx5e_dev);
err_devlink_unregister:
@@ -6815,17 +6838,20 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
- struct mlx5e_priv *priv = mlx5e_dev->priv;
+ struct net_device *netdev = mlx5e_dev->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = edev->mdev;
mlx5_core_uplink_netdev_set(mdev, NULL);
- mlx5e_dcbnl_delete_app(priv);
+
+ if (priv->profile)
+ mlx5e_dcbnl_delete_app(priv);
/* When unload driver, the netdev is in registered state
* if it's from legacy mode. If from switchdev mode, it
* is already unregistered before changing to NIC profile.
*/
- if (priv->netdev->reg_state == NETREG_REGISTERED) {
- unregister_netdev(priv->netdev);
+ if (netdev->reg_state == NETREG_REGISTERED) {
+ unregister_netdev(netdev);
_mlx5e_suspend(adev, false);
} else {
struct mlx5_core_dev *pos;
@@ -6840,7 +6866,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
/* Avoid cleanup if profile rollback failed. */
if (priv->profile)
priv->profile->cleanup(priv);
- mlx5e_destroy_netdev(priv);
+ mlx5e_destroy_netdev(netdev);
mlx5e_devlink_port_unregister(mlx5e_dev);
mlx5e_destroy_devlink(mlx5e_dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index ee9595109649..6eec88fa6d10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1508,17 +1508,16 @@ mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *
{
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev;
- struct mlx5e_priv *priv;
int err;
netdev = mlx5_uplink_netdev_get(dev);
if (!netdev)
return 0;
- priv = netdev_priv(netdev);
- rpriv->netdev = priv->netdev;
- err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
- rpriv);
+ /* must not use netdev_priv(netdev), it might not be initialized yet */
+ rpriv->netdev = netdev;
+ err = mlx5e_netdev_change_profile(netdev, dev,
+ &mlx5e_uplink_rep_profile, rpriv);
mlx5_uplink_netdev_put(dev, netdev);
return err;
}
@@ -1546,7 +1545,7 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
unregister_netdev(netdev);
- mlx5e_netdev_attach_nic_profile(priv);
+ mlx5e_netdev_attach_nic_profile(netdev, priv->mdev);
}
static int
@@ -1612,7 +1611,7 @@ err_cleanup_profile:
priv->profile->cleanup(priv);
err_destroy_netdev:
- mlx5e_destroy_netdev(netdev_priv(netdev));
+ mlx5e_destroy_netdev(netdev);
return err;
}
@@ -1667,7 +1666,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
mlx5e_rep_vnic_reporter_destroy(priv);
mlx5e_detach_netdev(priv);
priv->profile->cleanup(priv);
- mlx5e_destroy_netdev(priv);
+ mlx5e_destroy_netdev(netdev);
free_ppriv:
kvfree(ppriv); /* mlx5e_rep_priv */
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3d47d749ef9f..cbd52cb79268 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1750,6 +1750,9 @@ static int netvsc_set_rxfh(struct net_device *dev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ if (!ndc->rx_table_sz)
+ return -EOPNOTSUPP;
+
rndis_dev = ndev->extension;
if (rxfh->indir) {
for (i = 0; i < ndc->rx_table_sz; i++)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 7966545512cf..b4df7e184791 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -59,7 +59,7 @@ struct macvlan_port {
struct macvlan_source_entry {
struct hlist_node hlist;
- struct macvlan_dev *vlan;
+ struct macvlan_dev __rcu *vlan;
unsigned char addr[6+2] __aligned(sizeof(u16));
struct rcu_head rcu;
};
@@ -146,7 +146,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source(
hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
if (ether_addr_equal_64bits(entry->addr, addr) &&
- entry->vlan == vlan)
+ rcu_access_pointer(entry->vlan) == vlan)
return entry;
}
return NULL;
@@ -168,7 +168,7 @@ static int macvlan_hash_add_source(struct macvlan_dev *vlan,
return -ENOMEM;
ether_addr_copy(entry->addr, addr);
- entry->vlan = vlan;
+ RCU_INIT_POINTER(entry->vlan, vlan);
h = &port->vlan_source_hash[macvlan_eth_hash(addr)];
hlist_add_head_rcu(&entry->hlist, h);
vlan->macaddr_count++;
@@ -187,6 +187,7 @@ static void macvlan_hash_add(struct macvlan_dev *vlan)
static void macvlan_hash_del_source(struct macvlan_source_entry *entry)
{
+ RCU_INIT_POINTER(entry->vlan, NULL);
hlist_del_rcu(&entry->hlist);
kfree_rcu(entry, rcu);
}
@@ -390,7 +391,7 @@ static void macvlan_flush_sources(struct macvlan_port *port,
int i;
hash_for_each_safe(port->vlan_source_hash, i, next, entry, hlist)
- if (entry->vlan == vlan)
+ if (rcu_access_pointer(entry->vlan) == vlan)
macvlan_hash_del_source(entry);
vlan->macaddr_count = 0;
@@ -433,9 +434,14 @@ static bool macvlan_forward_source(struct sk_buff *skb,
hlist_for_each_entry_rcu(entry, h, hlist) {
if (ether_addr_equal_64bits(entry->addr, addr)) {
- if (entry->vlan->flags & MACVLAN_FLAG_NODST)
+ struct macvlan_dev *vlan = rcu_dereference(entry->vlan);
+
+ if (!vlan)
+ continue;
+
+ if (vlan->flags & MACVLAN_FLAG_NODST)
consume = true;
- macvlan_forward_source_one(skb, entry->vlan);
+ macvlan_forward_source_one(skb, vlan);
}
}
@@ -1680,7 +1686,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb,
struct macvlan_source_entry *entry;
hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
- if (entry->vlan != vlan)
+ if (rcu_access_pointer(entry->vlan) != vlan)
continue;
if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
return 1;
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 89b5b19a9bd2..42d46b5758fc 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -1741,10 +1741,10 @@ static int yt8521_led_hw_control_set(struct phy_device *phydev, u8 index,
val |= YT8521_LED_1000_ON_EN;
if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
- val |= YT8521_LED_HDX_ON_EN;
+ val |= YT8521_LED_FDX_ON_EN;
if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
- val |= YT8521_LED_FDX_ON_EN;
+ val |= YT8521_LED_HDX_ON_EN;
if (test_bit(TRIGGER_NETDEV_TX, &rules) ||
test_bit(TRIGGER_NETDEV_RX, &rules))
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 22d894101c01..db88dcaefb20 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -425,9 +425,6 @@ struct virtnet_info {
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
- struct virtio_net_rss_config_hdr *rss_hdr;
- struct virtio_net_rss_config_trailer rss_trailer;
- u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
/* Has control virtqueue */
bool has_cvq;
@@ -441,9 +438,6 @@ struct virtnet_info {
/* Packet virtio header size */
u8 hdr_len;
- /* Work struct for delayed refilling if we run low on memory. */
- struct delayed_work refill;
-
/* UDP tunnel support */
bool tx_tnl;
@@ -451,12 +445,6 @@ struct virtnet_info {
bool rx_tnl_csum;
- /* Is delayed refill enabled? */
- bool refill_enabled;
-
- /* The lock to synchronize the access to refill_enabled */
- spinlock_t refill_lock;
-
/* Work struct for config space updates */
struct work_struct config_work;
@@ -493,7 +481,16 @@ struct virtnet_info {
struct failover *failover;
u64 device_stats_cap;
+
+ struct virtio_net_rss_config_hdr *rss_hdr;
+
+ /* Must be last as it ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct virtio_net_rss_config_trailer, rss_trailer, hash_key_data,
+ u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+ );
};
+static_assert(offsetof(struct virtnet_info, rss_trailer.hash_key_data) ==
+ offsetof(struct virtnet_info, rss_hash_key_data));
struct padded_vnet_hdr {
struct virtio_net_hdr_v1_hash hdr;
@@ -720,20 +717,6 @@ static void virtnet_rq_free_buf(struct virtnet_info *vi,
put_page(virt_to_head_page(buf));
}
-static void enable_delayed_refill(struct virtnet_info *vi)
-{
- spin_lock_bh(&vi->refill_lock);
- vi->refill_enabled = true;
- spin_unlock_bh(&vi->refill_lock);
-}
-
-static void disable_delayed_refill(struct virtnet_info *vi)
-{
- spin_lock_bh(&vi->refill_lock);
- vi->refill_enabled = false;
- spin_unlock_bh(&vi->refill_lock);
-}
-
static void enable_rx_mode_work(struct virtnet_info *vi)
{
rtnl_lock();
@@ -2948,42 +2931,6 @@ static void virtnet_napi_disable(struct receive_queue *rq)
napi_disable(napi);
}
-static void refill_work(struct work_struct *work)
-{
- struct virtnet_info *vi =
- container_of(work, struct virtnet_info, refill.work);
- bool still_empty;
- int i;
-
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- struct receive_queue *rq = &vi->rq[i];
-
- /*
- * When queue API support is added in the future and the call
- * below becomes napi_disable_locked, this driver will need to
- * be refactored.
- *
- * One possible solution would be to:
- * - cancel refill_work with cancel_delayed_work (note:
- * non-sync)
- * - cancel refill_work with cancel_delayed_work_sync in
- * virtnet_remove after the netdev is unregistered
- * - wrap all of the work in a lock (perhaps the netdev
- * instance lock)
- * - check netif_running() and return early to avoid a race
- */
- napi_disable(&rq->napi);
- still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_do_enable(rq->vq, &rq->napi);
-
- /* In theory, this can happen: if we don't get any buffers in
- * we will *never* try to fill again.
- */
- if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
- }
-}
-
static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
struct receive_queue *rq,
int budget,
@@ -3046,16 +2993,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
else
packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
+ u64_stats_set(&stats.packets, packets);
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
- if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
- spin_lock(&vi->refill_lock);
- if (vi->refill_enabled)
- schedule_delayed_work(&vi->refill, 0);
- spin_unlock(&vi->refill_lock);
- }
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC))
+ /* We need to retry refilling in the next NAPI poll so
+ * we must return budget to make sure the NAPI is
+ * repolled.
+ */
+ packets = budget;
}
- u64_stats_set(&stats.packets, packets);
u64_stats_update_begin(&rq->stats.syncp);
for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
size_t offset = virtnet_rq_stats_desc[i].offset;
@@ -3226,13 +3173,12 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i, err;
- enable_delayed_refill(vi);
-
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
- /* Make sure we have some buffers: if oom use wq. */
- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
+ /* Pre-fill rq agressively, to make sure we are ready to
+ * get packets immediately.
+ */
+ try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
err = virtnet_enable_queue_pair(vi, i);
if (err < 0)
@@ -3251,9 +3197,6 @@ static int virtnet_open(struct net_device *dev)
return 0;
err_enable_qp:
- disable_delayed_refill(vi);
- cancel_delayed_work_sync(&vi->refill);
-
for (i--; i >= 0; i--) {
virtnet_disable_queue_pair(vi, i);
virtnet_cancel_dim(vi, &vi->rq[i].dim);
@@ -3432,8 +3375,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void __virtnet_rx_pause(struct virtnet_info *vi,
- struct receive_queue *rq)
+static void virtnet_rx_pause(struct virtnet_info *vi,
+ struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
@@ -3447,62 +3390,37 @@ static void virtnet_rx_pause_all(struct virtnet_info *vi)
{
int i;
- /*
- * Make sure refill_work does not run concurrently to
- * avoid napi_disable race which leads to deadlock.
- */
- disable_delayed_refill(vi);
- cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++)
- __virtnet_rx_pause(vi, &vi->rq[i]);
+ virtnet_rx_pause(vi, &vi->rq[i]);
}
-static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+static void virtnet_rx_resume(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ bool refill)
{
- /*
- * Make sure refill_work does not run concurrently to
- * avoid napi_disable race which leads to deadlock.
- */
- disable_delayed_refill(vi);
- cancel_delayed_work_sync(&vi->refill);
- __virtnet_rx_pause(vi, rq);
-}
-
-static void __virtnet_rx_resume(struct virtnet_info *vi,
- struct receive_queue *rq,
- bool refill)
-{
- bool running = netif_running(vi->dev);
- bool schedule_refill = false;
+ if (netif_running(vi->dev)) {
+ /* Pre-fill rq agressively, to make sure we are ready to get
+ * packets immediately.
+ */
+ if (refill)
+ try_fill_recv(vi, rq, GFP_KERNEL);
- if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
- schedule_refill = true;
- if (running)
virtnet_napi_enable(rq);
-
- if (schedule_refill)
- schedule_delayed_work(&vi->refill, 0);
+ }
}
static void virtnet_rx_resume_all(struct virtnet_info *vi)
{
int i;
- enable_delayed_refill(vi);
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
- __virtnet_rx_resume(vi, &vi->rq[i], true);
+ virtnet_rx_resume(vi, &vi->rq[i], true);
else
- __virtnet_rx_resume(vi, &vi->rq[i], false);
+ virtnet_rx_resume(vi, &vi->rq[i], false);
}
}
-static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
-{
- enable_delayed_refill(vi);
- __virtnet_rx_resume(vi, rq, true);
-}
-
static int virtnet_rx_resize(struct virtnet_info *vi,
struct receive_queue *rq, u32 ring_num)
{
@@ -3516,7 +3434,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
if (err)
netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
- virtnet_rx_resume(vi, rq);
+ virtnet_rx_resume(vi, rq, true);
return err;
}
@@ -3829,11 +3747,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
}
succ:
vi->curr_queue_pairs = queue_pairs;
- /* virtnet_open() will refill when device is going to up. */
- spin_lock_bh(&vi->refill_lock);
- if (dev->flags & IFF_UP && vi->refill_enabled)
- schedule_delayed_work(&vi->refill, 0);
- spin_unlock_bh(&vi->refill_lock);
+ if (dev->flags & IFF_UP) {
+ local_bh_disable();
+ for (int i = 0; i < vi->curr_queue_pairs; ++i)
+ virtqueue_napi_schedule(&vi->rq[i].napi, vi->rq[i].vq);
+ local_bh_enable();
+ }
return 0;
}
@@ -3843,10 +3762,6 @@ static int virtnet_close(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i;
- /* Make sure NAPI doesn't schedule refill work */
- disable_delayed_refill(vi);
- /* Make sure refill_work doesn't re-enable napi! */
- cancel_delayed_work_sync(&vi->refill);
/* Prevent the config change callback from changing carrier
* after close
*/
@@ -5802,7 +5717,6 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
- enable_delayed_refill(vi);
enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
@@ -5892,7 +5806,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
rq->xsk_pool = pool;
- virtnet_rx_resume(vi, rq);
+ virtnet_rx_resume(vi, rq, true);
if (pool)
return 0;
@@ -6559,7 +6473,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
if (!vi->rq)
goto err_rq;
- INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
@@ -6901,7 +6814,6 @@ static int virtnet_probe(struct virtio_device *vdev)
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
- spin_lock_init(&vi->refill_lock);
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
vi->mergeable_rx_bufs = true;
@@ -7165,7 +7077,6 @@ free_failover:
net_failover_destroy(vi->failover);
free_vqs:
virtio_reset_device(vdev);
- cancel_delayed_work_sync(&vi->refill);
free_receive_page_frags(vi);
virtnet_del_vqs(vi);
free:
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index e52ce9b01f49..9b57312f43f5 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -1169,7 +1169,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
* This function should be used only if there is any requirement
* to check for FOS version below 6.3.
* To check if the attached fabric is a brocade fabric, use
- * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3
+ * fabric->lps->brcd_switch which works for FOS versions 6.3
* or above only.
*/
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index f869108fd969..eebca96c1fc1 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1063,6 +1063,9 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
{
struct scsi_device *sdev = scmd->device;
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct request *rq = scsi_cmd_to_rq(scmd);
+#endif
/*
* We need saved copies of a number of fields - this is because
@@ -1115,6 +1118,18 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
(sdev->lun << 5 & 0xe0);
/*
+ * Encryption must be disabled for the commands submitted by the error handler.
+ * Hence, clear the encryption context information.
+ */
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ ses->rq_crypt_keyslot = rq->crypt_keyslot;
+ ses->rq_crypt_ctx = rq->crypt_ctx;
+
+ rq->crypt_keyslot = NULL;
+ rq->crypt_ctx = NULL;
+#endif
+
+ /*
* Zero the sense buffer. The scsi spec mandates that any
* untransferred sense data should be interpreted as being zero.
*/
@@ -1131,6 +1146,10 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
*/
void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
{
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct request *rq = scsi_cmd_to_rq(scmd);
+#endif
+
/*
* Restore original data
*/
@@ -1143,6 +1162,11 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
scmd->underflow = ses->underflow;
scmd->prot_op = ses->prot_op;
scmd->eh_eflags = ses->eh_eflags;
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ rq->crypt_keyslot = ses->rq_crypt_keyslot;
+ rq->crypt_ctx = ses->rq_crypt_ctx;
+#endif
}
EXPORT_SYMBOL(scsi_eh_restore_cmnd);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 93031326ac3e..c7d6b76c86d2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2459,7 +2459,7 @@ EXPORT_SYMBOL(scsi_mode_sense);
* @retries: number of retries before failing
* @sshdr: outpout pointer for decoded sense information.
*
- * Returns zero if unsuccessful or an error if TUR failed. For
+ * Returns zero if successful or an error if TUR failed. For
* removable media, UNIT_ATTENTION sets ->changed flag.
**/
int
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 3d4d00188c26..d933cebad52b 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -23,6 +23,7 @@ const struct device_type sdw_slave_type = {
.release = sdw_slave_release,
.uevent = sdw_slave_uevent,
};
+EXPORT_SYMBOL_GPL(sdw_slave_type);
int sdw_slave_add(struct sdw_bus *bus,
struct sdw_slave_id *id, struct fwnode_handle *fwnode)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 0babb7035200..604043a7533d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -10736,9 +10736,7 @@ static int ufshcd_add_scsi_host(struct ufs_hba *hba)
if (is_mcq_supported(hba)) {
ufshcd_mcq_enable(hba);
err = ufshcd_alloc_mcq(hba);
- if (!err) {
- ufshcd_config_mcq(hba);
- } else {
+ if (err) {
/* Continue with SDB mode */
ufshcd_mcq_disable(hba);
use_mcq_mode = false;
@@ -11011,6 +11009,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
if (err)
goto out_disable;
+ if (hba->mcq_enabled)
+ ufshcd_config_mcq(hba);
+
if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
goto initialized;
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index ecbbf52bf734..66b11cc0703b 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -1112,7 +1112,7 @@ static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
unsigned long flags;
u32 ah_ms = 10;
u32 ah_scale, ah_timer;
- u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000};
+ static const u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000};
if (ufshcd_is_clkgating_allowed(hba)) {
if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) {
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 97ebe457c00a..d27a0b1080a9 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -484,7 +484,7 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
bio_clone_blkg_association(new, prev);
new->bi_iter.bi_sector = bio_end_sector(prev);
- bio_chain(prev, new);
+ bio_chain(new, prev);
submit_bio(prev);
return new;
}
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index ab76120705e2..134d7f760a33 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -417,8 +417,10 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
d->map = bl_map_simple;
d->pr_key = v->scsi.pr_key;
- if (d->len == 0)
- return -ENODEV;
+ if (d->len == 0) {
+ error = -ENODEV;
+ goto out_blkdev_put;
+ }
ops = bdev->bd_disk->fops->pr_ops;
if (!ops) {
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 2248e3ad089a..8a3857a49d84 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -149,7 +149,7 @@ static int nfs4_do_check_delegation(struct inode *inode, fmode_t type,
int nfs4_have_delegation(struct inode *inode, fmode_t type, int flags)
{
if (S_ISDIR(inode->i_mode) && !directory_delegations)
- nfs_inode_evict_delegation(inode);
+ nfs4_inode_set_return_delegation_on_close(inode);
return nfs4_do_check_delegation(inode, type, flags, true);
}
@@ -581,6 +581,10 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
if (delegation == NULL)
return 0;
+ /* Directory delegations don't require any state recovery */
+ if (!S_ISREG(inode->i_mode))
+ goto out_return;
+
if (!issync)
mode |= O_NONBLOCK;
/* Recall of any remaining application leases */
@@ -604,6 +608,7 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
goto out;
}
+out_return:
err = nfs_do_return_delegation(inode, delegation, issync);
out:
/* Refcount matched in nfs_start_delegation_return_locked() */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 23a78a742b61..8f9ea79b7882 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1440,7 +1440,8 @@ static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf)
if (!dir || !nfs_verify_change_attribute(dir, verf))
return;
- if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ, 0))
+ if (NFS_PROTO(dir)->have_delegation(dir, FMODE_READ, 0) ||
+ (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ, 0)))
nfs_set_verifier_delegated(&verf);
dentry->d_time = verf;
}
@@ -1465,6 +1466,49 @@ void nfs_set_verifier(struct dentry *dentry, unsigned long verf)
EXPORT_SYMBOL_GPL(nfs_set_verifier);
#if IS_ENABLED(CONFIG_NFS_V4)
+static void nfs_clear_verifier_file(struct inode *inode)
+{
+ struct dentry *alias;
+ struct inode *dir;
+
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
+ spin_lock(&alias->d_lock);
+ dir = d_inode_rcu(alias->d_parent);
+ if (!dir ||
+ !NFS_PROTO(dir)->have_delegation(dir, FMODE_READ, 0))
+ nfs_unset_verifier_delegated(&alias->d_time);
+ spin_unlock(&alias->d_lock);
+ }
+}
+
+static void nfs_clear_verifier_directory(struct inode *dir)
+{
+ struct dentry *this_parent;
+ struct dentry *dentry;
+ struct inode *inode;
+
+ if (hlist_empty(&dir->i_dentry))
+ return;
+ this_parent =
+ hlist_entry(dir->i_dentry.first, struct dentry, d_u.d_alias);
+
+ spin_lock(&this_parent->d_lock);
+ nfs_unset_verifier_delegated(&this_parent->d_time);
+ dentry = d_first_child(this_parent);
+ hlist_for_each_entry_from(dentry, d_sib) {
+ if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
+ continue;
+ inode = d_inode_rcu(dentry);
+ if (inode &&
+ NFS_PROTO(inode)->have_delegation(inode, FMODE_READ, 0))
+ continue;
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ nfs_unset_verifier_delegated(&dentry->d_time);
+ spin_unlock(&dentry->d_lock);
+ }
+ spin_unlock(&this_parent->d_lock);
+}
+
/**
* nfs_clear_verifier_delegated - clear the dir verifier delegation tag
* @inode: pointer to inode
@@ -1477,16 +1521,13 @@ EXPORT_SYMBOL_GPL(nfs_set_verifier);
*/
void nfs_clear_verifier_delegated(struct inode *inode)
{
- struct dentry *alias;
-
if (!inode)
return;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
- spin_lock(&alias->d_lock);
- nfs_unset_verifier_delegated(&alias->d_time);
- spin_unlock(&alias->d_lock);
- }
+ if (S_ISREG(inode->i_mode))
+ nfs_clear_verifier_file(inode);
+ else if (S_ISDIR(inode->i_mode))
+ nfs_clear_verifier_directory(inode);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL_GPL(nfs_clear_verifier_delegated);
@@ -1516,14 +1557,6 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
if (!nfs_dentry_verify_change(dir, dentry))
return 0;
- /*
- * If we have a directory delegation then we don't need to revalidate
- * the directory. The delegation will either get recalled or we will
- * receive a notification when it changes.
- */
- if (nfs_have_directory_delegation(dir))
- return 0;
-
/* Revalidate nfsi->cache_change_attribute before we declare a match */
if (nfs_mapping_need_revalidate_inode(dir)) {
if (rcu_walk)
@@ -2217,13 +2250,6 @@ no_open:
EXPORT_SYMBOL_GPL(nfs_atomic_open);
static int
-nfs_lookup_revalidate_delegated_parent(struct inode *dir, struct dentry *dentry,
- struct inode *inode)
-{
- return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
-}
-
-static int
nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name,
struct dentry *dentry, unsigned int flags)
{
@@ -2247,12 +2273,10 @@ nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name,
if (inode == NULL)
goto full_reval;
- if (nfs_verifier_is_delegated(dentry))
+ if (nfs_verifier_is_delegated(dentry) ||
+ nfs_have_directory_delegation(inode))
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
- if (nfs_have_directory_delegation(dir))
- return nfs_lookup_revalidate_delegated_parent(dir, dentry, inode);
-
/* NFS only supports OPEN on regular files */
if (!S_ISREG(inode->i_mode))
goto full_reval;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index d020aab40c64..d1c138a416cf 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -511,7 +511,8 @@ static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL ||
current_is_kswapd() || current_is_kcompactd())
return false;
- if (nfs_wb_folio(folio->mapping->host, folio) < 0)
+ if (nfs_wb_folio_reclaim(folio->mapping->host, folio) < 0 ||
+ folio_test_private(folio))
return false;
}
return nfs_fscache_release_folio(folio, gfp);
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index c55ea8fa3bfa..c2d8a13a9dbd 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -103,7 +103,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
sizeof(struct nfs4_ff_ds_version),
gfp_flags);
if (!ds_versions)
- goto out_scratch;
+ goto out_err_drain_dsaddrs;
for (i = 0; i < version_count; i++) {
/* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 84049f3cd340..de2cce1d08f4 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -716,7 +716,7 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
{
struct inode *inode = d_inode(dentry);
struct nfs_fattr *fattr;
- loff_t oldsize = i_size_read(inode);
+ loff_t oldsize;
int error = 0;
kuid_t task_uid = current_fsuid();
kuid_t owner_uid = inode->i_uid;
@@ -727,6 +727,10 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
attr->ia_valid &= ~ATTR_MODE;
+ if (S_ISREG(inode->i_mode))
+ nfs_file_block_o_direct(NFS_I(inode));
+
+ oldsize = i_size_read(inode);
if (attr->ia_valid & ATTR_SIZE) {
BUG_ON(!S_ISREG(inode->i_mode));
@@ -774,10 +778,8 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
trace_nfs_setattr_enter(inode);
/* Write all dirty data */
- if (S_ISREG(inode->i_mode)) {
- nfs_file_block_o_direct(NFS_I(inode));
+ if (S_ISREG(inode->i_mode))
nfs_sync_inode(inode);
- }
fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
if (fattr == NULL) {
diff --git a/fs/nfs/io.c b/fs/nfs/io.c
index d275b0a250bf..8337f0ae852d 100644
--- a/fs/nfs/io.c
+++ b/fs/nfs/io.c
@@ -84,6 +84,7 @@ nfs_start_io_write(struct inode *inode)
nfs_file_block_o_direct(NFS_I(inode));
return err;
}
+EXPORT_SYMBOL_GPL(nfs_start_io_write);
/**
* nfs_end_io_write - declare that the buffered write operation is done
@@ -97,6 +98,7 @@ nfs_end_io_write(struct inode *inode)
{
up_write(&inode->i_rwsem);
}
+EXPORT_SYMBOL_GPL(nfs_end_io_write);
/* Call with exclusively locked inode->i_rwsem */
static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
index a113bfdacfd6..41fbcb3f9167 100644
--- a/fs/nfs/localio.c
+++ b/fs/nfs/localio.c
@@ -461,6 +461,8 @@ nfs_local_iters_init(struct nfs_local_kiocb *iocb, int rw)
v = 0;
total = hdr->args.count;
base = hdr->args.pgbase;
+ pagevec += base >> PAGE_SHIFT;
+ base &= ~PAGE_MASK;
while (total && v < hdr->page_array.npages) {
len = min_t(size_t, total, PAGE_SIZE - base);
bvec_set_page(&iocb->bvec[v], *pagevec, len, base);
@@ -618,7 +620,6 @@ static void nfs_local_call_read(struct work_struct *work)
struct nfs_local_kiocb *iocb =
container_of(work, struct nfs_local_kiocb, work);
struct file *filp = iocb->kiocb.ki_filp;
- bool force_done = false;
ssize_t status;
int n_iters;
@@ -637,13 +638,13 @@ static void nfs_local_call_read(struct work_struct *work)
scoped_with_creds(filp->f_cred)
status = filp->f_op->read_iter(&iocb->kiocb, &iocb->iters[i]);
- if (status != -EIOCBQUEUED) {
- if (unlikely(status >= 0 && status < iocb->iters[i].count))
- force_done = true; /* Partial read */
- if (nfs_local_pgio_done(iocb, status, force_done)) {
- nfs_local_read_iocb_done(iocb);
- break;
- }
+ if (status == -EIOCBQUEUED)
+ continue;
+ /* Break on completion, errors, or short reads */
+ if (nfs_local_pgio_done(iocb, status, false) || status < 0 ||
+ (size_t)status < iov_iter_count(&iocb->iters[i])) {
+ nfs_local_read_iocb_done(iocb);
+ break;
}
}
}
@@ -821,7 +822,6 @@ static void nfs_local_call_write(struct work_struct *work)
container_of(work, struct nfs_local_kiocb, work);
struct file *filp = iocb->kiocb.ki_filp;
unsigned long old_flags = current->flags;
- bool force_done = false;
ssize_t status;
int n_iters;
@@ -843,13 +843,13 @@ static void nfs_local_call_write(struct work_struct *work)
scoped_with_creds(filp->f_cred)
status = filp->f_op->write_iter(&iocb->kiocb, &iocb->iters[i]);
- if (status != -EIOCBQUEUED) {
- if (unlikely(status >= 0 && status < iocb->iters[i].count))
- force_done = true; /* Partial write */
- if (nfs_local_pgio_done(iocb, status, force_done)) {
- nfs_local_write_iocb_done(iocb);
- break;
- }
+ if (status == -EIOCBQUEUED)
+ continue;
+ /* Break on completion, errors, or short writes */
+ if (nfs_local_pgio_done(iocb, status, false) || status < 0 ||
+ (size_t)status < iov_iter_count(&iocb->iters[i])) {
+ nfs_local_write_iocb_done(iocb);
+ break;
}
}
file_end_write(filp);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index d537fb0c230e..c08520828708 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -114,7 +114,6 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
exception.inode = inode;
exception.state = lock->open_context->state;
- nfs_file_block_o_direct(NFS_I(inode));
err = nfs_sync_inode(inode);
if (err)
goto out;
@@ -138,13 +137,17 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
};
struct inode *inode = file_inode(filep);
- loff_t oldsize = i_size_read(inode);
+ loff_t oldsize;
int err;
if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
return -EOPNOTSUPP;
- inode_lock(inode);
+ err = nfs_start_io_write(inode);
+ if (err)
+ return err;
+
+ oldsize = i_size_read(inode);
err = nfs42_proc_fallocate(&msg, filep, offset, len);
@@ -155,7 +158,7 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
NFS_SERVER(inode)->caps &= ~(NFS_CAP_ALLOCATE |
NFS_CAP_ZERO_RANGE);
- inode_unlock(inode);
+ nfs_end_io_write(inode);
return err;
}
@@ -170,7 +173,9 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
return -EOPNOTSUPP;
- inode_lock(inode);
+ err = nfs_start_io_write(inode);
+ if (err)
+ return err;
err = nfs42_proc_fallocate(&msg, filep, offset, len);
if (err == 0)
@@ -179,7 +184,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
NFS_SERVER(inode)->caps &= ~(NFS_CAP_DEALLOCATE |
NFS_CAP_ZERO_RANGE);
- inode_unlock(inode);
+ nfs_end_io_write(inode);
return err;
}
@@ -189,14 +194,17 @@ int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len)
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ZERO_RANGE],
};
struct inode *inode = file_inode(filep);
- loff_t oldsize = i_size_read(inode);
+ loff_t oldsize;
int err;
if (!nfs_server_capable(inode, NFS_CAP_ZERO_RANGE))
return -EOPNOTSUPP;
- inode_lock(inode);
+ err = nfs_start_io_write(inode);
+ if (err)
+ return err;
+ oldsize = i_size_read(inode);
err = nfs42_proc_fallocate(&msg, filep, offset, len);
if (err == 0) {
nfs_truncate_last_folio(inode->i_mapping, oldsize,
@@ -205,7 +213,7 @@ int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len)
} else if (err == -EOPNOTSUPP)
NFS_SERVER(inode)->caps &= ~NFS_CAP_ZERO_RANGE;
- inode_unlock(inode);
+ nfs_end_io_write(inode);
return err;
}
@@ -416,7 +424,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
struct nfs_server *src_server = NFS_SERVER(src_inode);
loff_t pos_src = args->src_pos;
loff_t pos_dst = args->dst_pos;
- loff_t oldsize_dst = i_size_read(dst_inode);
+ loff_t oldsize_dst;
size_t count = args->count;
ssize_t status;
@@ -461,6 +469,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
&src_lock->open_context->state->flags);
set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
&dst_lock->open_context->state->flags);
+ oldsize_dst = i_size_read(dst_inode);
status = nfs4_call_sync(dst_server->client, dst_server, &msg,
&args->seq_args, &res->seq_res, 0);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ec1ce593dea2..a0885ae55abc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3894,8 +3894,8 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
- calldata->lr.roc = pnfs_roc(state->inode,
- &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
+ calldata->lr.roc = pnfs_roc(state->inode, &calldata->lr.arg,
+ &calldata->lr.res, msg.rpc_cred, wait);
if (calldata->lr.roc) {
calldata->arg.lr_args = &calldata->lr.arg;
calldata->res.lr_res = &calldata->lr.res;
@@ -4494,6 +4494,25 @@ static bool should_request_dir_deleg(struct inode *inode)
}
#endif /* CONFIG_NFS_V4_1 */
+static void nfs4_call_getattr_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_call_sync_data *data = calldata;
+ nfs4_setup_sequence(data->seq_server->nfs_client, data->seq_args,
+ data->seq_res, task);
+}
+
+static void nfs4_call_getattr_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_call_sync_data *data = calldata;
+
+ nfs4_sequence_process(task, data->seq_res);
+}
+
+static const struct rpc_call_ops nfs4_call_getattr_ops = {
+ .rpc_call_prepare = nfs4_call_getattr_prepare,
+ .rpc_call_done = nfs4_call_getattr_done,
+};
+
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fattr *fattr, struct inode *inode)
{
@@ -4511,16 +4530,26 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_argp = &args,
.rpc_resp = &res,
};
+ struct nfs4_call_sync_data data = {
+ .seq_server = server,
+ .seq_args = &args.seq_args,
+ .seq_res = &res.seq_res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_call_getattr_ops,
+ .callback_data = &data,
+ };
struct nfs4_gdd_res gdd_res;
- unsigned short task_flags = 0;
int status;
if (nfs4_has_session(server->nfs_client))
- task_flags = RPC_TASK_MOVEABLE;
+ task_setup.flags = RPC_TASK_MOVEABLE;
/* Is this is an attribute revalidation, subject to softreval? */
if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
- task_flags |= RPC_TASK_TIMEOUT;
+ task_setup.flags |= RPC_TASK_TIMEOUT;
args.get_dir_deleg = should_request_dir_deleg(inode);
if (args.get_dir_deleg)
@@ -4530,22 +4559,24 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
nfs_fattr_init(fattr);
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
- status = nfs4_do_call_sync(server->client, server, &msg,
- &args.seq_args, &res.seq_res, task_flags);
+ status = nfs4_call_sync_custom(&task_setup);
+
if (args.get_dir_deleg) {
switch (status) {
case 0:
if (gdd_res.status != GDD4_OK)
break;
- status = nfs_inode_set_delegation(
- inode, current_cred(), FMODE_READ,
- &gdd_res.deleg, 0, NFS4_OPEN_DELEGATE_READ);
+ nfs_inode_set_delegation(inode, current_cred(),
+ FMODE_READ, &gdd_res.deleg, 0,
+ NFS4_OPEN_DELEGATE_READ);
break;
case -ENOTSUPP:
case -EOPNOTSUPP:
server->caps &= ~NFS_CAP_DIR_DELEG;
}
}
+
+ nfs4_sequence_free_slot(&res.seq_res);
return status;
}
@@ -7005,7 +7036,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
data->inode = nfs_igrab_and_active(inode);
if (data->inode || issync) {
data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
- cred);
+ cred, issync);
if (data->lr.roc) {
data->args.lr_args = &data->lr.arg;
data->res.lr_res = &data->lr.res;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 01179f7de322..dba51c622cf3 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1445,6 +1445,8 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
struct nfs4_state *state;
bool found = false;
+ if (!S_ISREG(inode->i_mode))
+ goto out;
rcu_read_lock();
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
state = ctx->state;
@@ -1466,7 +1468,7 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
found = true;
}
rcu_read_unlock();
-
+out:
nfs_inode_find_delegation_state_and_recover(inode, stateid);
if (found)
nfs4_schedule_state_manager(clp);
@@ -1478,6 +1480,8 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state, int err)
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_open_context *ctx;
+ if (!S_ISREG(inode->i_mode))
+ return;
rcu_read_lock();
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
if (ctx->state != state)
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 6ce55e8e6b67..9f9ce4a565ea 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -1062,6 +1062,9 @@ DECLARE_EVENT_CLASS(nfs_folio_event_done,
DEFINE_NFS_FOLIO_EVENT(nfs_aop_readpage);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_aop_readpage_done);
+DEFINE_NFS_FOLIO_EVENT(nfs_writeback_folio_reclaim);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_reclaim_done);
+
DEFINE_NFS_FOLIO_EVENT(nfs_writeback_folio);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_done);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index b72d7cc36766..cff225721d1c 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1533,10 +1533,9 @@ static int pnfs_layout_return_on_reboot(struct pnfs_layout_hdr *lo)
PNFS_FL_LAYOUTRETURN_PRIVILEGED);
}
-bool pnfs_roc(struct inode *ino,
- struct nfs4_layoutreturn_args *args,
- struct nfs4_layoutreturn_res *res,
- const struct cred *cred)
+bool pnfs_roc(struct inode *ino, struct nfs4_layoutreturn_args *args,
+ struct nfs4_layoutreturn_res *res, const struct cred *cred,
+ bool sync)
{
struct nfs_inode *nfsi = NFS_I(ino);
struct nfs_open_context *ctx;
@@ -1547,7 +1546,7 @@ bool pnfs_roc(struct inode *ino,
nfs4_stateid stateid;
enum pnfs_iomode iomode = 0;
bool layoutreturn = false, roc = false;
- bool skip_read = false;
+ bool skip_read;
if (!nfs_have_layout(ino))
return false;
@@ -1560,20 +1559,14 @@ retry:
lo = NULL;
goto out_noroc;
}
- pnfs_get_layout_hdr(lo);
- if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
- spin_unlock(&ino->i_lock);
- rcu_read_unlock();
- wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
- TASK_UNINTERRUPTIBLE);
- pnfs_put_layout_hdr(lo);
- goto retry;
- }
/* no roc if we hold a delegation */
+ skip_read = false;
if (nfs4_check_delegation(ino, FMODE_READ)) {
- if (nfs4_check_delegation(ino, FMODE_WRITE))
+ if (nfs4_check_delegation(ino, FMODE_WRITE)) {
+ lo = NULL;
goto out_noroc;
+ }
skip_read = true;
}
@@ -1582,12 +1575,43 @@ retry:
if (state == NULL)
continue;
/* Don't return layout if there is open file state */
- if (state->state & FMODE_WRITE)
+ if (state->state & FMODE_WRITE) {
+ lo = NULL;
goto out_noroc;
+ }
if (state->state & FMODE_READ)
skip_read = true;
}
+ if (skip_read) {
+ bool writes = false;
+
+ list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
+ if (lseg->pls_range.iomode != IOMODE_READ) {
+ writes = true;
+ break;
+ }
+ }
+ if (!writes) {
+ lo = NULL;
+ goto out_noroc;
+ }
+ }
+
+ pnfs_get_layout_hdr(lo);
+ if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
+ if (!sync) {
+ pnfs_set_plh_return_info(
+ lo, skip_read ? IOMODE_RW : IOMODE_ANY, 0);
+ goto out_noroc;
+ }
+ spin_unlock(&ino->i_lock);
+ rcu_read_unlock();
+ wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
+ TASK_UNINTERRUPTIBLE);
+ pnfs_put_layout_hdr(lo);
+ goto retry;
+ }
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
@@ -1627,7 +1651,7 @@ retry:
out_noroc:
spin_unlock(&ino->i_lock);
rcu_read_unlock();
- pnfs_layoutcommit_inode(ino, true);
+ pnfs_layoutcommit_inode(ino, sync);
if (roc) {
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
if (ld->prepare_layoutreturn)
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 91ff877185c8..3db8f13d8fe4 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -303,10 +303,9 @@ int pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
u32 seq);
int pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
struct list_head *lseg_list);
-bool pnfs_roc(struct inode *ino,
- struct nfs4_layoutreturn_args *args,
- struct nfs4_layoutreturn_res *res,
- const struct cred *cred);
+bool pnfs_roc(struct inode *ino, struct nfs4_layoutreturn_args *args,
+ struct nfs4_layoutreturn_res *res, const struct cred *cred,
+ bool sync);
int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
struct nfs4_layoutreturn_res **respp, int *ret);
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
@@ -773,12 +772,10 @@ pnfs_layoutcommit_outstanding(struct inode *inode)
return false;
}
-
-static inline bool
-pnfs_roc(struct inode *ino,
- struct nfs4_layoutreturn_args *args,
- struct nfs4_layoutreturn_res *res,
- const struct cred *cred)
+static inline bool pnfs_roc(struct inode *ino,
+ struct nfs4_layoutreturn_args *args,
+ struct nfs4_layoutreturn_res *res,
+ const struct cred *cred, bool sync)
{
return false;
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 336c510f3750..bf412455e8ed 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -2025,6 +2025,39 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
}
/**
+ * nfs_wb_folio_reclaim - Write back all requests on one page
+ * @inode: pointer to page
+ * @folio: pointer to folio
+ *
+ * Assumes that the folio has been locked by the caller
+ */
+int nfs_wb_folio_reclaim(struct inode *inode, struct folio *folio)
+{
+ loff_t range_start = folio_pos(folio);
+ size_t len = folio_size(folio);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0,
+ .range_start = range_start,
+ .range_end = range_start + len - 1,
+ .for_sync = 1,
+ };
+ int ret;
+
+ if (folio_test_writeback(folio))
+ return -EBUSY;
+ if (folio_clear_dirty_for_io(folio)) {
+ trace_nfs_writeback_folio_reclaim(inode, range_start, len);
+ ret = nfs_writepage_locked(folio, &wbc);
+ trace_nfs_writeback_folio_reclaim_done(inode, range_start, len,
+ ret);
+ return ret;
+ }
+ nfs_commit_inode(inode, 0);
+ return 0;
+}
+
+/**
* nfs_wb_folio - Write back all requests on one page
* @inode: pointer to page
* @folio: pointer to folio
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index d97295eaebe6..c19d6d713780 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -848,15 +848,16 @@ sparse_alloc:
* invalid inode records, such as records that start at agbno 0
* or extend beyond the AG.
*
- * Set min agbno to the first aligned, non-zero agbno and max to
- * the last aligned agbno that is at least one full chunk from
- * the end of the AG.
+ * Set min agbno to the first chunk aligned, non-zero agbno and
+ * max to one less than the last chunk aligned agbno from the
+ * end of the AG. We subtract 1 from max so that the cluster
+ * allocation alignment takes over and allows allocation within
+ * the last full inode chunk in the AG.
*/
args.min_agbno = args.mp->m_sb.sb_inoalignmt;
args.max_agbno = round_down(xfs_ag_block_count(args.mp,
pag_agno(pag)),
- args.mp->m_sb.sb_inoalignmt) -
- igeo->ialloc_blks;
+ args.mp->m_sb.sb_inoalignmt) - 1;
error = xfs_alloc_vextent_near_bno(&args,
xfs_agbno_to_fsb(pag,
diff --git a/fs/xfs/libxfs/xfs_rtgroup.c b/fs/xfs/libxfs/xfs_rtgroup.c
index 9186c58e83d5..be16efaa6925 100644
--- a/fs/xfs/libxfs/xfs_rtgroup.c
+++ b/fs/xfs/libxfs/xfs_rtgroup.c
@@ -48,6 +48,31 @@ xfs_rtgroup_min_block(
return 0;
}
+/* Compute the number of rt extents in this realtime group. */
+static xfs_rtxnum_t
+__xfs_rtgroup_extents(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno,
+ xfs_rgnumber_t rgcount,
+ xfs_rtbxlen_t rextents)
+{
+ ASSERT(rgno < rgcount);
+ if (rgno == rgcount - 1)
+ return rextents - ((xfs_rtxnum_t)rgno * mp->m_sb.sb_rgextents);
+
+ ASSERT(xfs_has_rtgroups(mp));
+ return mp->m_sb.sb_rgextents;
+}
+
+xfs_rtxnum_t
+xfs_rtgroup_extents(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno)
+{
+ return __xfs_rtgroup_extents(mp, rgno, mp->m_sb.sb_rgcount,
+ mp->m_sb.sb_rextents);
+}
+
/* Precompute this group's geometry */
void
xfs_rtgroup_calc_geometry(
@@ -58,7 +83,8 @@ xfs_rtgroup_calc_geometry(
xfs_rtbxlen_t rextents)
{
rtg->rtg_extents = __xfs_rtgroup_extents(mp, rgno, rgcount, rextents);
- rtg_group(rtg)->xg_block_count = rtg->rtg_extents * mp->m_sb.sb_rextsize;
+ rtg_group(rtg)->xg_block_count =
+ rtg->rtg_extents * mp->m_sb.sb_rextsize;
rtg_group(rtg)->xg_min_gbno = xfs_rtgroup_min_block(mp, rgno);
}
@@ -136,31 +162,6 @@ out_unwind_new_rtgs:
return error;
}
-/* Compute the number of rt extents in this realtime group. */
-xfs_rtxnum_t
-__xfs_rtgroup_extents(
- struct xfs_mount *mp,
- xfs_rgnumber_t rgno,
- xfs_rgnumber_t rgcount,
- xfs_rtbxlen_t rextents)
-{
- ASSERT(rgno < rgcount);
- if (rgno == rgcount - 1)
- return rextents - ((xfs_rtxnum_t)rgno * mp->m_sb.sb_rgextents);
-
- ASSERT(xfs_has_rtgroups(mp));
- return mp->m_sb.sb_rgextents;
-}
-
-xfs_rtxnum_t
-xfs_rtgroup_extents(
- struct xfs_mount *mp,
- xfs_rgnumber_t rgno)
-{
- return __xfs_rtgroup_extents(mp, rgno, mp->m_sb.sb_rgcount,
- mp->m_sb.sb_rextents);
-}
-
/*
* Update the rt extent count of the previous tail rtgroup if it changed during
* recovery (i.e. recovery of a growfs).
diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h
index 03f1e2493334..73cace4d25c7 100644
--- a/fs/xfs/libxfs/xfs_rtgroup.h
+++ b/fs/xfs/libxfs/xfs_rtgroup.h
@@ -285,8 +285,6 @@ void xfs_free_rtgroups(struct xfs_mount *mp, xfs_rgnumber_t first_rgno,
int xfs_initialize_rtgroups(struct xfs_mount *mp, xfs_rgnumber_t first_rgno,
xfs_rgnumber_t end_rgno, xfs_rtbxlen_t rextents);
-xfs_rtxnum_t __xfs_rtgroup_extents(struct xfs_mount *mp, xfs_rgnumber_t rgno,
- xfs_rgnumber_t rgcount, xfs_rtbxlen_t rextents);
xfs_rtxnum_t xfs_rtgroup_extents(struct xfs_mount *mp, xfs_rgnumber_t rgno);
void xfs_rtgroup_calc_geometry(struct xfs_mount *mp, struct xfs_rtgroup *rtg,
xfs_rgnumber_t rgno, xfs_rgnumber_t rgcount,
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a311385b23d8..d4544ccafea5 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1180,9 +1180,11 @@ xfs_log_cover(
int error = 0;
bool need_covered;
- ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
- !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
- xlog_is_shutdown(mp->m_log));
+ if (!xlog_is_shutdown(mp->m_log)) {
+ ASSERT(xlog_cil_empty(mp->m_log));
+ ASSERT(xlog_iclogs_empty(mp->m_log));
+ ASSERT(!xfs_ail_min_lsn(mp->m_log->l_ailp));
+ }
if (!xfs_log_writable(mp))
return 0;
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index e063f4f2f2e6..a12ffed12391 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -126,7 +126,7 @@ xfs_rtcopy_summary(
error = 0;
out:
xfs_rtbuf_cache_relse(oargs);
- return 0;
+ return error;
}
/*
* Mark an extent specified by start and len allocated.
@@ -1265,7 +1265,7 @@ xfs_growfs_check_rtgeom(
uint32_t rem;
if (rextsize != 1)
- return -EINVAL;
+ goto out_inval;
div_u64_rem(nmp->m_sb.sb_rblocks, gblocks, &rem);
if (rem) {
xfs_warn(mp,
@@ -1326,7 +1326,7 @@ xfs_grow_last_rtg(
return true;
if (mp->m_sb.sb_rgcount == 0)
return false;
- return xfs_rtgroup_extents(mp, mp->m_sb.sb_rgcount - 1) <=
+ return xfs_rtgroup_extents(mp, mp->m_sb.sb_rgcount - 1) <
mp->m_sb.sb_rgextents;
}
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
index 04b18d0e37af..30fbbde81c5c 100644
--- a/include/hyperv/hvgdk_mini.h
+++ b/include/hyperv/hvgdk_mini.h
@@ -578,9 +578,12 @@ struct hv_tlb_flush { /* HV_INPUT_FLUSH_VIRTUAL_ADDRESS_LIST */
struct hv_tlb_flush_ex {
u64 address_space;
u64 flags;
- struct hv_vpset hv_vp_set;
- u64 gva_list[];
+ __TRAILING_OVERLAP(struct hv_vpset, hv_vp_set, bank_contents, __packed,
+ u64 gva_list[];
+ );
} __packed;
+static_assert(offsetof(struct hv_tlb_flush_ex, hv_vp_set.bank_contents) ==
+ offsetof(struct hv_tlb_flush_ex, gva_list));
struct ms_hyperv_tsc_page { /* HV_REFERENCE_TSC_PAGE */
volatile u32 tsc_sequence;
diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
index 8afa92d15a66..1e99fda2b380 100644
--- a/include/linux/can/can-ml.h
+++ b/include/linux/can/can-ml.h
@@ -46,6 +46,12 @@
#include <linux/list.h>
#include <linux/netdevice.h>
+/* exposed CAN device capabilities for network layer */
+#define CAN_CAP_CC BIT(0) /* CAN CC aka Classical CAN */
+#define CAN_CAP_FD BIT(1) /* CAN FD */
+#define CAN_CAP_XL BIT(2) /* CAN XL */
+#define CAN_CAP_RO BIT(3) /* read-only mode (LISTEN/RESTRICTED) */
+
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
#define CAN_EFF_RCV_HASH_BITS 10
#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
@@ -64,6 +70,7 @@ struct can_ml_priv {
#ifdef CAN_J1939
struct j1939_priv *j1939_priv;
#endif
+ u32 can_cap;
};
static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
@@ -77,4 +84,21 @@ static inline void can_set_ml_priv(struct net_device *dev,
netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
}
+static inline bool can_cap_enabled(struct net_device *dev, u32 cap)
+{
+ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
+
+ if (!can_ml)
+ return false;
+
+ return (can_ml->can_cap & cap);
+}
+
+static inline void can_set_cap(struct net_device *dev, u32 cap)
+{
+ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
+
+ can_ml->can_cap = cap;
+}
+
#endif /* CAN_ML_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index f6416a56e95d..6d0710d6f571 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -111,18 +111,12 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
void free_candev(struct net_device *dev);
/* a candev safe wrapper around netdev_priv */
-#if IS_ENABLED(CONFIG_CAN_NETLINK)
struct can_priv *safe_candev_priv(struct net_device *dev);
-#else
-static inline struct can_priv *safe_candev_priv(struct net_device *dev)
-{
- return NULL;
-}
-#endif
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
void can_set_default_mtu(struct net_device *dev);
+void can_set_cap_info(struct net_device *dev);
int __must_check can_set_static_ctrlmode(struct net_device *dev,
u32 static_mode);
int can_hwtstamp_get(struct net_device *netdev,
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index b760a3c470a5..f7cc60de0058 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -626,7 +626,13 @@ struct cgroup {
#endif
/* All ancestors including self */
- struct cgroup *ancestors[];
+ union {
+ DECLARE_FLEX_ARRAY(struct cgroup *, ancestors);
+ struct {
+ struct cgroup *_root_ancestor;
+ DECLARE_FLEX_ARRAY(struct cgroup *, _low_ancestors);
+ };
+ };
};
/*
@@ -647,16 +653,6 @@ struct cgroup_root {
struct list_head root_list;
struct rcu_head rcu; /* Must be near the top */
- /*
- * The root cgroup. The containing cgroup_root will be destroyed on its
- * release. cgrp->ancestors[0] will be used overflowing into the
- * following field. cgrp_ancestor_storage must immediately follow.
- */
- struct cgroup cgrp;
-
- /* must follow cgrp for cgrp->ancestors[0], see above */
- struct cgroup *cgrp_ancestor_storage;
-
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
@@ -668,6 +664,13 @@ struct cgroup_root {
/* The name for this hierarchy - may be empty */
char name[MAX_CGROUP_ROOT_NAMELEN];
+
+ /*
+ * The root cgroup. The containing cgroup_root will be destroyed on its
+ * release. This must be embedded last due to flexible array at the end
+ * of struct cgroup.
+ */
+ struct cgroup cgrp;
};
/*
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 0ad1ddbb8b99..e5822f6e7f27 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -211,6 +211,7 @@ struct kmem_obj_info;
* __kfence_obj_info() - fill kmem_obj_info struct
* @kpp: kmem_obj_info to be filled
* @object: the object
+ * @slab: the slab
*
* Return:
* * false - not a KFENCE object
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index a6624edb7226..8dd79a3f3d66 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -637,6 +637,7 @@ extern int nfs_update_folio(struct file *file, struct folio *folio,
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_folio(struct inode *inode, struct folio *folio);
+extern int nfs_wb_folio_reclaim(struct inode *inode, struct folio *folio);
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(void);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index cf3c6ab408aa..207156f2143c 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -83,6 +83,7 @@ static inline void reset_hung_task_detector(void) { }
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
extern unsigned int hardlockup_panic;
+extern unsigned long hardlockup_si_mask;
#else
static inline void hardlockup_detector_disable(void) {}
#endif
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 0e1d73955fa5..95d0040df584 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -325,6 +325,7 @@ static inline void might_alloc(gfp_t gfp_mask)
/**
* memalloc_flags_save - Add a PF_* flag to current->flags, save old value
+ * @flags: Flags to add.
*
* This allows PF_* flags to be conveniently added, irrespective of current
* value, and then the old version restored with memalloc_flags_restore().
diff --git a/include/linux/soc/airoha/airoha_offload.h b/include/linux/soc/airoha/airoha_offload.h
index ab64ecdf39a0..d01ef4a6b3d7 100644
--- a/include/linux/soc/airoha/airoha_offload.h
+++ b/include/linux/soc/airoha/airoha_offload.h
@@ -52,8 +52,8 @@ static inline void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
{
}
-static inline int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev,
- void *type_data)
+static inline int airoha_ppe_dev_setup_tc_block_cb(struct airoha_ppe_dev *dev,
+ void *type_data)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 6673e4d4ac2e..4933777404d6 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -35,6 +35,7 @@ struct ts_state
* @get_pattern: return head of pattern
* @get_pattern_len: return length of pattern
* @owner: module reference to algorithm
+ * @list: list to search
*/
struct ts_ops
{
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 58d91ccc56e0..a7b7abd66e21 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -67,6 +67,7 @@
FN(TC_EGRESS) \
FN(SECURITY_HOOK) \
FN(QDISC_DROP) \
+ FN(QDISC_BURST_DROP) \
FN(QDISC_OVERLIMIT) \
FN(QDISC_CONGESTED) \
FN(CAKE_FLOOD) \
@@ -375,6 +376,11 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_QDISC_DROP,
/**
+ * @SKB_DROP_REASON_QDISC_BURST_DROP: dropped when net.core.qdisc_max_burst
+ * limit is hit.
+ */
+ SKB_DROP_REASON_QDISC_BURST_DROP,
+ /**
* @SKB_DROP_REASON_QDISC_OVERLIMIT: dropped by qdisc when a qdisc
* instance exceeds its total buffer size limit.
*/
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
index 4acec191c54a..6632b1aa7584 100644
--- a/include/net/hotdata.h
+++ b/include/net/hotdata.h
@@ -42,6 +42,7 @@ struct net_hotdata {
int netdev_budget_usecs;
int tstamp_prequeue;
int max_backlog;
+ int qdisc_max_burst;
int dev_tx_weight;
int dev_rx_weight;
int sysctl_max_skb_frags;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index ecae35512b9b..4021e6a73e32 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -19,6 +19,7 @@
#include <net/rtnetlink.h>
#include <net/lwtunnel.h>
#include <net/dst_cache.h>
+#include <net/netdev_lock.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -372,7 +373,17 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
fl4->flowi4_flags = flow_flags;
}
-int ip_tunnel_init(struct net_device *dev);
+int __ip_tunnel_init(struct net_device *dev);
+#define ip_tunnel_init(DEV) \
+({ \
+ struct net_device *__dev = (DEV); \
+ int __res = __ip_tunnel_init(__dev); \
+ \
+ if (!__res) \
+ netdev_lockdep_set_classes(__dev);\
+ __res; \
+})
+
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
struct net *ip_tunnel_get_link_net(const struct net_device *dev);
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 1ae08e81339f..15679be90c5c 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -41,6 +41,12 @@ struct scsi_eh_save {
unsigned char cmnd[32];
struct scsi_data_buffer sdb;
struct scatterlist sense_sgl;
+
+ /* struct request fields */
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *rq_crypt_ctx;
+ struct blk_crypto_keyslot *rq_crypt_keyslot;
+#endif
};
extern void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 58fd6e84f961..a7860c047503 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1402,7 +1402,7 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
#define snd_pcm_lib_mmap_iomem NULL
#endif
-void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime);
+int snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime);
/**
* snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
diff --git a/include/uapi/linux/media/arm/mali-c55-config.h b/include/uapi/linux/media/arm/mali-c55-config.h
index 109082c5694f..3d335f950eeb 100644
--- a/include/uapi/linux/media/arm/mali-c55-config.h
+++ b/include/uapi/linux/media/arm/mali-c55-config.h
@@ -195,15 +195,6 @@ struct mali_c55_stats_buffer {
} __attribute__((packed));
/**
- * enum mali_c55_param_buffer_version - Mali-C55 parameters block versioning
- *
- * @MALI_C55_PARAM_BUFFER_V1: First version of Mali-C55 parameters block
- */
-enum mali_c55_param_buffer_version {
- MALI_C55_PARAM_BUFFER_V1,
-};
-
-/**
* enum mali_c55_param_block_type - Enumeration of Mali-C55 parameter blocks
*
* This enumeration defines the types of Mali-C55 parameters block. Each block
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f0ca69f888fa..3135643d5695 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -9609,6 +9609,11 @@ static int check_reg_const_str(struct bpf_verifier_env *env,
if (reg->type != PTR_TO_MAP_VALUE)
return -EINVAL;
+ if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY) {
+ verbose(env, "R%d points to insn_array map which cannot be used as const string\n", regno);
+ return -EACCES;
+ }
+
if (!bpf_map_is_rdonly(map)) {
verbose(env, "R%d does not point to a readonly map'\n", regno);
return -EACCES;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index e717208cfb18..554a02ee298b 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5847,7 +5847,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent, const char *name,
int ret;
/* allocate the cgroup and its ID, 0 is reserved for the root */
- cgrp = kzalloc(struct_size(cgrp, ancestors, (level + 1)), GFP_KERNEL);
+ cgrp = kzalloc(struct_size(cgrp, _low_ancestors, level), GFP_KERNEL);
if (!cgrp)
return ERR_PTR(-ENOMEM);
diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c
index 9dc51fab604f..d4482b6e3cae 100644
--- a/kernel/liveupdate/kexec_handover.c
+++ b/kernel/liveupdate/kexec_handover.c
@@ -460,27 +460,23 @@ static void __init deserialize_bitmap(unsigned int order,
}
}
-/* Return true if memory was deserizlied */
-static bool __init kho_mem_deserialize(const void *fdt)
+/* Returns physical address of the preserved memory map from FDT */
+static phys_addr_t __init kho_get_mem_map_phys(const void *fdt)
{
- struct khoser_mem_chunk *chunk;
const void *mem_ptr;
- u64 mem;
int len;
mem_ptr = fdt_getprop(fdt, 0, PROP_PRESERVED_MEMORY_MAP, &len);
if (!mem_ptr || len != sizeof(u64)) {
pr_err("failed to get preserved memory bitmaps\n");
- return false;
+ return 0;
}
- mem = get_unaligned((const u64 *)mem_ptr);
- chunk = mem ? phys_to_virt(mem) : NULL;
-
- /* No preserved physical pages were passed, no deserialization */
- if (!chunk)
- return false;
+ return get_unaligned((const u64 *)mem_ptr);
+}
+static void __init kho_mem_deserialize(struct khoser_mem_chunk *chunk)
+{
while (chunk) {
unsigned int i;
@@ -489,8 +485,6 @@ static bool __init kho_mem_deserialize(const void *fdt)
&chunk->bitmaps[i]);
chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
}
-
- return true;
}
/*
@@ -1253,6 +1247,7 @@ bool kho_finalized(void)
struct kho_in {
phys_addr_t fdt_phys;
phys_addr_t scratch_phys;
+ phys_addr_t mem_map_phys;
struct kho_debugfs dbg;
};
@@ -1434,12 +1429,10 @@ static void __init kho_release_scratch(void)
void __init kho_memory_init(void)
{
- if (kho_in.scratch_phys) {
+ if (kho_in.mem_map_phys) {
kho_scratch = phys_to_virt(kho_in.scratch_phys);
kho_release_scratch();
-
- if (!kho_mem_deserialize(kho_get_fdt()))
- kho_in.fdt_phys = 0;
+ kho_mem_deserialize(phys_to_virt(kho_in.mem_map_phys));
} else {
kho_reserve_scratch();
}
@@ -1448,8 +1441,9 @@ void __init kho_memory_init(void)
void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
phys_addr_t scratch_phys, u64 scratch_len)
{
- void *fdt = NULL;
struct kho_scratch *scratch = NULL;
+ phys_addr_t mem_map_phys;
+ void *fdt = NULL;
int err = 0;
unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch);
@@ -1475,6 +1469,12 @@ void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
goto out;
}
+ mem_map_phys = kho_get_mem_map_phys(fdt);
+ if (!mem_map_phys) {
+ err = -ENOENT;
+ goto out;
+ }
+
scratch = early_memremap(scratch_phys, scratch_len);
if (!scratch) {
pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n",
@@ -1515,6 +1515,7 @@ void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
kho_in.fdt_phys = fdt_phys;
kho_in.scratch_phys = scratch_phys;
+ kho_in.mem_map_phys = mem_map_phys;
kho_scratch_cnt = scratch_cnt;
pr_info("found kexec handover data.\n");
diff --git a/kernel/module/kmod.c b/kernel/module/kmod.c
index 25f253812512..a25dccdf7aa7 100644
--- a/kernel/module/kmod.c
+++ b/kernel/module/kmod.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kmod - the kernel module loader
*
diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c
index 3fa403f9831f..32fc12e53675 100644
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
@@ -1557,18 +1557,27 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
ctxt->allow_unsafe_takeover = nbcon_allow_unsafe_takeover();
while (nbcon_seq_read(con) < stop_seq) {
- if (!nbcon_context_try_acquire(ctxt, false))
- return -EPERM;
-
/*
- * nbcon_emit_next_record() returns false when the console was
- * handed over or taken over. In both cases the context is no
- * longer valid.
+ * Atomic flushing does not use console driver synchronization
+ * (i.e. it does not hold the port lock for uart consoles).
+ * Therefore IRQs must be disabled to avoid being interrupted
+ * and then calling into a driver that will deadlock trying
+ * to acquire console ownership.
*/
- if (!nbcon_emit_next_record(&wctxt, true))
- return -EAGAIN;
+ scoped_guard(irqsave) {
+ if (!nbcon_context_try_acquire(ctxt, false))
+ return -EPERM;
- nbcon_context_release(ctxt);
+ /*
+ * nbcon_emit_next_record() returns false when
+ * the console was handed over or taken over.
+ * In both cases the context is no longer valid.
+ */
+ if (!nbcon_emit_next_record(&wctxt, true))
+ return -EAGAIN;
+
+ nbcon_context_release(ctxt);
+ }
if (!ctxt->backlog) {
/* Are there reserved but not yet finalized records? */
@@ -1595,22 +1604,11 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
{
struct console_flush_type ft;
- unsigned long flags;
int err;
again:
- /*
- * Atomic flushing does not use console driver synchronization (i.e.
- * it does not hold the port lock for uart consoles). Therefore IRQs
- * must be disabled to avoid being interrupted and then calling into
- * a driver that will deadlock trying to acquire console ownership.
- */
- local_irq_save(flags);
-
err = __nbcon_atomic_flush_pending_con(con, stop_seq);
- local_irq_restore(flags);
-
/*
* If there was a new owner (-EPERM, -EAGAIN), that context is
* responsible for completing.
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ef2d5dca6f70..aa758efc3731 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1148,7 +1148,6 @@ struct ftrace_page {
};
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
-#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
static struct ftrace_page *ftrace_pages_start;
static struct ftrace_page *ftrace_pages;
@@ -3834,7 +3833,8 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
return 0;
}
-static int ftrace_allocate_records(struct ftrace_page *pg, int count)
+static int ftrace_allocate_records(struct ftrace_page *pg, int count,
+ unsigned long *num_pages)
{
int order;
int pages;
@@ -3844,7 +3844,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
return -EINVAL;
/* We want to fill as much as possible, with no empty pages */
- pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
+ pages = DIV_ROUND_UP(count * ENTRY_SIZE, PAGE_SIZE);
order = fls(pages) - 1;
again:
@@ -3859,6 +3859,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
}
ftrace_number_of_pages += 1 << order;
+ *num_pages += 1 << order;
ftrace_number_of_groups++;
cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
@@ -3887,12 +3888,14 @@ static void ftrace_free_pages(struct ftrace_page *pages)
}
static struct ftrace_page *
-ftrace_allocate_pages(unsigned long num_to_init)
+ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages)
{
struct ftrace_page *start_pg;
struct ftrace_page *pg;
int cnt;
+ *num_pages = 0;
+
if (!num_to_init)
return NULL;
@@ -3906,7 +3909,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
* waste as little space as possible.
*/
for (;;) {
- cnt = ftrace_allocate_records(pg, num_to_init);
+ cnt = ftrace_allocate_records(pg, num_to_init, num_pages);
if (cnt < 0)
goto free_pages;
@@ -7192,8 +7195,6 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
- pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
-
/*
* Sorting mcount in vmlinux at build time depend on
* CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
@@ -7206,7 +7207,7 @@ static int ftrace_process_locs(struct module *mod,
test_is_sorted(start, count);
}
- start_pg = ftrace_allocate_pages(count);
+ start_pg = ftrace_allocate_pages(count, &pages);
if (!start_pg)
return -ENOMEM;
@@ -7305,27 +7306,27 @@ static int ftrace_process_locs(struct module *mod,
/* We should have used all pages unless we skipped some */
if (pg_unuse) {
unsigned long pg_remaining, remaining = 0;
- unsigned long skip;
+ long skip;
/* Count the number of entries unused and compare it to skipped. */
- pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index;
+ pg_remaining = (PAGE_SIZE << pg->order) / ENTRY_SIZE - pg->index;
if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) {
skip = skipped - pg_remaining;
- for (pg = pg_unuse; pg; pg = pg->next)
+ for (pg = pg_unuse; pg && skip > 0; pg = pg->next) {
remaining += 1 << pg->order;
+ skip -= (PAGE_SIZE << pg->order) / ENTRY_SIZE;
+ }
pages -= remaining;
- skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE);
-
/*
* Check to see if the number of pages remaining would
* just fit the number of entries skipped.
*/
- WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped",
+ WARN(pg || skip > 0, "Extra allocated pages for ftrace: %lu with %lu skipped",
remaining, skipped);
}
/* Need to synchronize with ftrace_location_range() */
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 0685e3a8aa0a..366122f4a0f8 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -71,7 +71,7 @@ unsigned int __read_mostly hardlockup_panic =
* hard lockup is detected, it could be task, memory, lock etc.
* Refer include/linux/sys_info.h for detailed bit definition.
*/
-static unsigned long hardlockup_si_mask;
+unsigned long hardlockup_si_mask;
#ifdef CONFIG_SYSFS
diff --git a/lib/buildid.c b/lib/buildid.c
index aaf61dfc0919..818331051afe 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -5,6 +5,7 @@
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
+#include <linux/fs.h>
#include <linux/secretmem.h>
#define BUILD_ID 3
@@ -46,20 +47,9 @@ static int freader_get_folio(struct freader *r, loff_t file_off)
freader_put_folio(r);
- /* reject secretmem folios created with memfd_secret() */
- if (secretmem_mapping(r->file->f_mapping))
- return -EFAULT;
-
+ /* only use page cache lookup - fail if not already cached */
r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT);
- /* if sleeping is allowed, wait for the page, if necessary */
- if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) {
- filemap_invalidate_lock_shared(r->file->f_mapping);
- r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT,
- NULL, r->file);
- filemap_invalidate_unlock_shared(r->file->f_mapping);
- }
-
if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) {
if (!IS_ERR(r->folio))
folio_put(r->folio);
@@ -97,6 +87,24 @@ const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz)
return r->data + file_off;
}
+ /* reject secretmem folios created with memfd_secret() */
+ if (secretmem_mapping(r->file->f_mapping)) {
+ r->err = -EFAULT;
+ return NULL;
+ }
+
+ /* use __kernel_read() for sleepable context */
+ if (r->may_fault) {
+ ssize_t ret;
+
+ ret = __kernel_read(r->file, r->buf, sz, &file_off);
+ if (ret != sz) {
+ r->err = (ret < 0) ? ret : -EIO;
+ return NULL;
+ }
+ return r->buf;
+ }
+
/* fetch or reuse folio for given file offset */
r->err = freader_get_folio(r, file_off);
if (r->err)
diff --git a/mm/damon/core.c b/mm/damon/core.c
index f9fc0375890a..84f80a20f233 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1431,6 +1431,35 @@ bool damon_is_running(struct damon_ctx *ctx)
return running;
}
+/*
+ * damon_call_handle_inactive_ctx() - handle DAMON call request that added to
+ * an inactive context.
+ * @ctx: The inactive DAMON context.
+ * @control: Control variable of the call request.
+ *
+ * This function is called in a case that @control is added to @ctx but @ctx is
+ * not running (inactive). See if @ctx handled @control or not, and cleanup
+ * @control if it was not handled.
+ *
+ * Returns 0 if @control was handled by @ctx, negative error code otherwise.
+ */
+static int damon_call_handle_inactive_ctx(
+ struct damon_ctx *ctx, struct damon_call_control *control)
+{
+ struct damon_call_control *c;
+
+ mutex_lock(&ctx->call_controls_lock);
+ list_for_each_entry(c, &ctx->call_controls, list) {
+ if (c == control) {
+ list_del(&control->list);
+ mutex_unlock(&ctx->call_controls_lock);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&ctx->call_controls_lock);
+ return 0;
+}
+
/**
* damon_call() - Invoke a given function on DAMON worker thread (kdamond).
* @ctx: DAMON context to call the function for.
@@ -1461,7 +1490,7 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
list_add_tail(&control->list, &ctx->call_controls);
mutex_unlock(&ctx->call_controls_lock);
if (!damon_is_running(ctx))
- return -EINVAL;
+ return damon_call_handle_inactive_ctx(ctx, control);
if (control->repeat)
return 0;
wait_for_completion(&control->completion);
@@ -2051,13 +2080,15 @@ static unsigned long damos_get_node_memcg_used_bp(
rcu_read_lock();
memcg = mem_cgroup_from_id(goal->memcg_id);
- rcu_read_unlock();
- if (!memcg) {
+ if (!memcg || !mem_cgroup_tryget(memcg)) {
+ rcu_read_unlock();
if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
return 0;
else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
return 10000;
}
+ rcu_read_unlock();
+
mem_cgroup_flush_stats(memcg);
lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid));
used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON);
@@ -2065,6 +2096,8 @@ static unsigned long damos_get_node_memcg_used_bp(
used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE);
used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE);
+ mem_cgroup_put(memcg);
+
si_meminfo_node(&i, goal->nid);
if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
numerator = used_pages;
@@ -2751,13 +2784,13 @@ done:
if (ctx->ops.cleanup)
ctx->ops.cleanup(ctx);
kfree(ctx->regions_score_histogram);
+ kdamond_call(ctx, true);
pr_debug("kdamond (%d) finishes\n", current->pid);
mutex_lock(&ctx->kdamond_lock);
ctx->kdamond = NULL;
mutex_unlock(&ctx->kdamond_lock);
- kdamond_call(ctx, true);
damos_walk_cancel(ctx);
mutex_lock(&damon_lock);
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index 30d20f5b3192..3a699dcd5a7f 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -2152,13 +2152,13 @@ static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
return err;
err = damos_sysfs_set_dests(scheme);
if (err)
- goto put_access_pattern_out;
+ goto rmdir_put_access_pattern_out;
err = damon_sysfs_scheme_set_quotas(scheme);
if (err)
goto put_dests_out;
err = damon_sysfs_scheme_set_watermarks(scheme);
if (err)
- goto put_quotas_access_pattern_out;
+ goto rmdir_put_quotas_access_pattern_out;
err = damos_sysfs_set_filter_dirs(scheme);
if (err)
goto put_watermarks_quotas_access_pattern_out;
@@ -2183,13 +2183,15 @@ put_filters_watermarks_quotas_access_pattern_out:
put_watermarks_quotas_access_pattern_out:
kobject_put(&scheme->watermarks->kobj);
scheme->watermarks = NULL;
-put_quotas_access_pattern_out:
+rmdir_put_quotas_access_pattern_out:
+ damon_sysfs_quotas_rm_dirs(scheme->quotas);
kobject_put(&scheme->quotas->kobj);
scheme->quotas = NULL;
put_dests_out:
kobject_put(&scheme->dests->kobj);
scheme->dests = NULL;
-put_access_pattern_out:
+rmdir_put_access_pattern_out:
+ damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
kobject_put(&scheme->access_pattern->kobj);
scheme->access_pattern = NULL;
return err;
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index e2bd2d7becdd..95fd9375a7d8 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -792,7 +792,7 @@ static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
if (!nr_regions_range) {
err = -ENOMEM;
- goto put_intervals_out;
+ goto rmdir_put_intervals_out;
}
err = kobject_init_and_add(&nr_regions_range->kobj,
@@ -806,6 +806,8 @@ static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
put_nr_regions_intervals_out:
kobject_put(&nr_regions_range->kobj);
attrs->nr_regions_range = NULL;
+rmdir_put_intervals_out:
+ damon_sysfs_intervals_rm_dirs(intervals);
put_intervals_out:
kobject_put(&intervals->kobj);
attrs->intervals = NULL;
@@ -948,7 +950,7 @@ static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
err = damon_sysfs_context_set_targets(context);
if (err)
- goto put_attrs_out;
+ goto rmdir_put_attrs_out;
err = damon_sysfs_context_set_schemes(context);
if (err)
@@ -958,7 +960,8 @@ static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
put_targets_attrs_out:
kobject_put(&context->targets->kobj);
context->targets = NULL;
-put_attrs_out:
+rmdir_put_attrs_out:
+ damon_sysfs_attrs_rm_dirs(context->attrs);
kobject_put(&context->attrs->kobj);
context->attrs = NULL;
return err;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 51273baec9e5..e0ab14020513 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4286,6 +4286,11 @@ static int __init hugepages_setup(char *s)
unsigned long tmp;
char *p = s;
+ if (!hugepages_supported()) {
+ pr_warn("HugeTLB: hugepages unsupported, ignoring hugepages=%s cmdline\n", s);
+ return 0;
+ }
+
if (!parsed_valid_hugepagesz) {
pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
parsed_valid_hugepagesz = true;
@@ -4366,6 +4371,11 @@ static int __init hugepagesz_setup(char *s)
unsigned long size;
struct hstate *h;
+ if (!hugepages_supported()) {
+ pr_warn("HugeTLB: hugepages unsupported, ignoring hugepagesz=%s cmdline\n", s);
+ return 0;
+ }
+
parsed_valid_hugepagesz = false;
size = (unsigned long)memparse(s, NULL);
@@ -4414,6 +4424,12 @@ static int __init default_hugepagesz_setup(char *s)
unsigned long size;
int i;
+ if (!hugepages_supported()) {
+ pr_warn("HugeTLB: hugepages unsupported, ignoring default_hugepagesz=%s cmdline\n",
+ s);
+ return 0;
+ }
+
parsed_valid_hugepagesz = false;
if (parsed_default_hugepagesz) {
pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
index e7f554a31bb4..9e1c5f2b7a41 100644
--- a/mm/kmsan/shadow.c
+++ b/mm/kmsan/shadow.c
@@ -207,7 +207,7 @@ void kmsan_free_page(struct page *page, unsigned int order)
if (!kmsan_enabled || kmsan_in_runtime())
return;
kmsan_enter_runtime();
- kmsan_internal_poison_memory(page_address(page), page_size(page),
+ kmsan_internal_poison_memory(page_address(page), PAGE_SIZE << order,
GFP_KERNEL & ~(__GFP_RECLAIM),
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
kmsan_leave_runtime();
diff --git a/mm/numa_memblks.c b/mm/numa_memblks.c
index 5b009a9cd8b4..8f5735fda0a2 100644
--- a/mm/numa_memblks.c
+++ b/mm/numa_memblks.c
@@ -7,6 +7,8 @@
#include <linux/numa.h>
#include <linux/numa_memblks.h>
+#include <asm/numa.h>
+
int numa_distance_cnt;
static u8 *numa_distance;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c380f063e8b7..f65c4edf199d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -167,6 +167,33 @@ static inline void __pcp_trylock_noop(unsigned long *flags) { }
pcp_trylock_finish(UP_flags); \
})
+/*
+ * With the UP spinlock implementation, when we spin_lock(&pcp->lock) (for i.e.
+ * a potentially remote cpu drain) and get interrupted by an operation that
+ * attempts pcp_spin_trylock(), we can't rely on the trylock failure due to UP
+ * spinlock assumptions making the trylock a no-op. So we have to turn that
+ * spin_lock() to a spin_lock_irqsave(). This works because on UP there are no
+ * remote cpu's so we can only be locking the only existing local one.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+static inline void __flags_noop(unsigned long *flags) { }
+#define pcp_spin_lock_maybe_irqsave(ptr, flags) \
+({ \
+ __flags_noop(&(flags)); \
+ spin_lock(&(ptr)->lock); \
+})
+#define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \
+({ \
+ spin_unlock(&(ptr)->lock); \
+ __flags_noop(&(flags)); \
+})
+#else
+#define pcp_spin_lock_maybe_irqsave(ptr, flags) \
+ spin_lock_irqsave(&(ptr)->lock, flags)
+#define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \
+ spin_unlock_irqrestore(&(ptr)->lock, flags)
+#endif
+
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
@@ -2556,6 +2583,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
{
int high_min, to_drain, to_drain_batched, batch;
+ unsigned long UP_flags;
bool todo = false;
high_min = READ_ONCE(pcp->high_min);
@@ -2575,9 +2603,9 @@ bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
to_drain = pcp->count - pcp->high;
while (to_drain > 0) {
to_drain_batched = min(to_drain, batch);
- spin_lock(&pcp->lock);
+ pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
free_pcppages_bulk(zone, to_drain_batched, pcp, 0);
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
todo = true;
to_drain -= to_drain_batched;
@@ -2594,14 +2622,15 @@ bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
*/
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
+ unsigned long UP_flags;
int to_drain, batch;
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0) {
- spin_lock(&pcp->lock);
+ pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
free_pcppages_bulk(zone, to_drain, pcp, 0);
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
}
}
#endif
@@ -2612,10 +2641,11 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{
struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
+ unsigned long UP_flags;
int count;
do {
- spin_lock(&pcp->lock);
+ pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
count = pcp->count;
if (count) {
int to_drain = min(count,
@@ -2624,7 +2654,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
free_pcppages_bulk(zone, to_drain, pcp, 0);
count -= to_drain;
}
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
} while (count);
}
@@ -6109,6 +6139,7 @@ static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
{
struct per_cpu_pages *pcp;
struct cpu_cacheinfo *cci;
+ unsigned long UP_flags;
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
cci = get_cpu_cacheinfo(cpu);
@@ -6119,12 +6150,12 @@ static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
* This can reduce zone lock contention without hurting
* cache-hot pages sharing.
*/
- spin_lock(&pcp->lock);
+ pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
pcp->flags |= PCPF_FREE_HIGH_BATCH;
else
pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
}
void setup_pcp_cacheinfo(unsigned int cpu)
@@ -6667,11 +6698,19 @@ static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *
int old_percpu_pagelist_high_fraction;
int ret;
+ /*
+ * Avoid using pcp_batch_high_lock for reads as the value is read
+ * atomically and a race with offlining is harmless.
+ */
+
+ if (!write)
+ return proc_dointvec_minmax(table, write, buffer, length, ppos);
+
mutex_lock(&pcp_batch_high_lock);
old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
- if (!write || ret < 0)
+ if (ret < 0)
goto out;
/* Sanity checking to avoid pcp imbalance */
diff --git a/mm/vma.c b/mm/vma.c
index fc90befd162f..dc92f3dd8514 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -67,18 +67,13 @@ struct mmap_state {
.state = VMA_MERGE_START, \
}
-/*
- * If, at any point, the VMA had unCoW'd mappings from parents, it will maintain
- * more than one anon_vma_chain connecting it to more than one anon_vma. A merge
- * would mean a wider range of folios sharing the root anon_vma lock, and thus
- * potential lock contention, we do not wish to encourage merging such that this
- * scales to a problem.
- */
-static bool vma_had_uncowed_parents(struct vm_area_struct *vma)
+/* Was this VMA ever forked from a parent, i.e. maybe contains CoW mappings? */
+static bool vma_is_fork_child(struct vm_area_struct *vma)
{
/*
* The list_is_singular() test is to avoid merging VMA cloned from
- * parents. This can improve scalability caused by anon_vma lock.
+ * parents. This can improve scalability caused by the anon_vma root
+ * lock.
*/
return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain);
}
@@ -115,11 +110,19 @@ static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next)
VM_WARN_ON(src && src_anon != src->anon_vma);
/* Case 1 - we will dup_anon_vma() from src into tgt. */
- if (!tgt_anon && src_anon)
- return !vma_had_uncowed_parents(src);
+ if (!tgt_anon && src_anon) {
+ struct vm_area_struct *copied_from = vmg->copied_from;
+
+ if (vma_is_fork_child(src))
+ return false;
+ if (vma_is_fork_child(copied_from))
+ return false;
+
+ return true;
+ }
/* Case 2 - we will simply use tgt's anon_vma. */
if (tgt_anon && !src_anon)
- return !vma_had_uncowed_parents(tgt);
+ return !vma_is_fork_child(tgt);
/* Case 3 - the anon_vma's are already shared. */
return src_anon == tgt_anon;
}
@@ -829,6 +832,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
VM_WARN_ON_VMG(middle &&
!(vma_iter_addr(vmg->vmi) >= middle->vm_start &&
vma_iter_addr(vmg->vmi) < middle->vm_end), vmg);
+ /* An existing merge can never be used by the mremap() logic. */
+ VM_WARN_ON_VMG(vmg->copied_from, vmg);
vmg->state = VMA_MERGE_NOMERGE;
@@ -1099,6 +1104,33 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
}
/*
+ * vma_merge_copied_range - Attempt to merge a VMA that is being copied by
+ * mremap()
+ *
+ * @vmg: Describes the VMA we are adding, in the copied-to range @vmg->start to
+ * @vmg->end (exclusive), which we try to merge with any adjacent VMAs if
+ * possible.
+ *
+ * vmg->prev, next, start, end, pgoff should all be relative to the COPIED TO
+ * range, i.e. the target range for the VMA.
+ *
+ * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
+ * to the VMA we expanded.
+ *
+ * ASSUMPTIONS: Same as vma_merge_new_range(), except vmg->middle must contain
+ * the copied-from VMA.
+ */
+static struct vm_area_struct *vma_merge_copied_range(struct vma_merge_struct *vmg)
+{
+ /* We must have a copied-from VMA. */
+ VM_WARN_ON_VMG(!vmg->middle, vmg);
+
+ vmg->copied_from = vmg->middle;
+ vmg->middle = NULL;
+ return vma_merge_new_range(vmg);
+}
+
+/*
* vma_expand - Expand an existing VMA
*
* @vmg: Describes a VMA expansion operation.
@@ -1117,46 +1149,52 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
int vma_expand(struct vma_merge_struct *vmg)
{
struct vm_area_struct *anon_dup = NULL;
- bool remove_next = false;
struct vm_area_struct *target = vmg->target;
struct vm_area_struct *next = vmg->next;
+ bool remove_next = false;
vm_flags_t sticky_flags;
-
- sticky_flags = vmg->vm_flags & VM_STICKY;
- sticky_flags |= target->vm_flags & VM_STICKY;
-
- VM_WARN_ON_VMG(!target, vmg);
+ int ret = 0;
mmap_assert_write_locked(vmg->mm);
-
vma_start_write(target);
- if (next && (target != next) && (vmg->end == next->vm_end)) {
- int ret;
- sticky_flags |= next->vm_flags & VM_STICKY;
+ if (next && target != next && vmg->end == next->vm_end)
remove_next = true;
- /* This should already have been checked by this point. */
- VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
- vma_start_write(next);
- /*
- * In this case we don't report OOM, so vmg->give_up_on_mm is
- * safe.
- */
- ret = dup_anon_vma(target, next, &anon_dup);
- if (ret)
- return ret;
- }
+ /* We must have a target. */
+ VM_WARN_ON_VMG(!target, vmg);
+ /* This should have already been checked by this point. */
+ VM_WARN_ON_VMG(remove_next && !can_merge_remove_vma(next), vmg);
/* Not merging but overwriting any part of next is not handled. */
VM_WARN_ON_VMG(next && !remove_next &&
next != target && vmg->end > next->vm_start, vmg);
- /* Only handles expanding */
+ /* Only handles expanding. */
VM_WARN_ON_VMG(target->vm_start < vmg->start ||
target->vm_end > vmg->end, vmg);
+ sticky_flags = vmg->vm_flags & VM_STICKY;
+ sticky_flags |= target->vm_flags & VM_STICKY;
if (remove_next)
- vmg->__remove_next = true;
+ sticky_flags |= next->vm_flags & VM_STICKY;
+
+ /*
+ * If we are removing the next VMA or copying from a VMA
+ * (e.g. mremap()'ing), we must propagate anon_vma state.
+ *
+ * Note that, by convention, callers ignore OOM for this case, so
+ * we don't need to account for vmg->give_up_on_mm here.
+ */
+ if (remove_next)
+ ret = dup_anon_vma(target, next, &anon_dup);
+ if (!ret && vmg->copied_from)
+ ret = dup_anon_vma(target, vmg->copied_from, &anon_dup);
+ if (ret)
+ return ret;
+ if (remove_next) {
+ vma_start_write(next);
+ vmg->__remove_next = true;
+ }
if (commit_merge(vmg))
goto nomem;
@@ -1828,10 +1866,9 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
if (new_vma && new_vma->vm_start < addr + len)
return NULL; /* should never get here */
- vmg.middle = NULL; /* New VMA range. */
vmg.pgoff = pgoff;
vmg.next = vma_iter_next_rewind(&vmi, NULL);
- new_vma = vma_merge_new_range(&vmg);
+ new_vma = vma_merge_copied_range(&vmg);
if (new_vma) {
/*
diff --git a/mm/vma.h b/mm/vma.h
index abada6a64c4e..9d5ee6ac913a 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -106,6 +106,9 @@ struct vma_merge_struct {
struct anon_vma_name *anon_name;
enum vma_merge_state state;
+ /* If copied from (i.e. mremap()'d) the VMA from which we are copying. */
+ struct vm_area_struct *copied_from;
+
/* Flags which callers can use to modify merge behaviour: */
/*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 41dd01e8430c..628f96e83b11 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4248,7 +4248,7 @@ void *vzalloc_node_noprof(unsigned long size, int node)
EXPORT_SYMBOL(vzalloc_node_noprof);
/**
- * vrealloc_node_align_noprof - reallocate virtually contiguous memory; contents
+ * vrealloc_node_align - reallocate virtually contiguous memory; contents
* remain unchanged
* @p: object to reallocate memory for
* @size: the size to reallocate
diff --git a/mm/zswap.c b/mm/zswap.c
index 5d0f8b13a958..ac9b7a60736b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -787,7 +787,7 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
return 0;
fail:
- if (acomp)
+ if (!IS_ERR_OR_NULL(acomp))
crypto_free_acomp(acomp);
kfree(buffer);
return ret;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index a9f5b1a68356..cbc3a75d7326 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -4420,6 +4420,7 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
if (bis_capable(hdev)) {
events[1] |= 0x20; /* LE PA Report */
events[1] |= 0x40; /* LE PA Sync Established */
+ events[1] |= 0x80; /* LE PA Sync Lost */
events[3] |= 0x04; /* LE Create BIG Complete */
events[3] |= 0x08; /* LE Terminate BIG Complete */
events[3] |= 0x10; /* LE BIG Sync Established */
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 655efac6f133..26cfcfdc45eb 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -1294,8 +1294,6 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
batch_size = NAPI_POLL_WEIGHT;
else if (batch_size > TEST_XDP_MAX_BATCH)
return -E2BIG;
-
- headroom += sizeof(struct xdp_page_head);
} else if (batch_size) {
return -EINVAL;
}
@@ -1308,16 +1306,26 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
/* There can't be user provided data before the meta data */
if (ctx->data_meta || ctx->data_end > kattr->test.data_size_in ||
ctx->data > ctx->data_end ||
- unlikely(xdp_metalen_invalid(ctx->data)) ||
(do_live && (kattr->test.data_out || kattr->test.ctx_out)))
goto free_ctx;
- /* Meta data is allocated from the headroom */
- headroom -= ctx->data;
meta_sz = ctx->data;
+ if (xdp_metalen_invalid(meta_sz) || meta_sz > headroom - sizeof(struct xdp_frame))
+ goto free_ctx;
+
+ /* Meta data is allocated from the headroom */
+ headroom -= meta_sz;
linear_sz = ctx->data_end;
}
+ /* The xdp_page_head structure takes up space in each page, limiting the
+ * size of the packet data; add the extra size to headroom here to make
+ * sure it's accounted in the length checks below, but not in the
+ * metadata size check above.
+ */
+ if (do_live)
+ headroom += sizeof(struct xdp_page_head);
+
max_linear_sz = PAGE_SIZE - headroom - tailroom;
linear_sz = min_t(u32, linear_sz, max_linear_sz);
@@ -1355,13 +1363,13 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (sinfo->nr_frags == MAX_SKB_FRAGS) {
ret = -ENOMEM;
- goto out;
+ goto out_put_dev;
}
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
- goto out;
+ goto out_put_dev;
}
frag = &sinfo->frags[sinfo->nr_frags++];
@@ -1373,7 +1381,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (copy_from_user(page_address(page), data_in + size,
data_len)) {
ret = -EFAULT;
- goto out;
+ goto out_put_dev;
}
sinfo->xdp_frags_size += data_len;
size += data_len;
@@ -1388,6 +1396,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
else
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
+out_put_dev:
/* We convert the xdp_buff back to an xdp_md before checking the return
* code so the reference count of any held netdevice will be decremented
* even if the test run failed.
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 58d22e2b85fc..0501ffcb8a3d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -70,7 +70,7 @@ static inline int has_expired(const struct net_bridge *br,
{
return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
!test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
- time_before_eq(fdb->updated + hold_time(br), jiffies);
+ time_before_eq(READ_ONCE(fdb->updated) + hold_time(br), jiffies);
}
static int fdb_to_nud(const struct net_bridge *br,
@@ -126,9 +126,9 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags))
goto nla_put_failure;
- ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
+ ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
ci.ndm_confirmed = 0;
- ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
+ ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated));
ci.ndm_refcnt = 0;
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
@@ -551,7 +551,7 @@ void br_fdb_cleanup(struct work_struct *work)
*/
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
- unsigned long this_timer = f->updated + delay;
+ unsigned long this_timer = READ_ONCE(f->updated) + delay;
if (test_bit(BR_FDB_STATIC, &f->flags) ||
test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
@@ -924,6 +924,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
{
struct net_bridge_fdb_entry *f;
struct __fdb_entry *fe = buf;
+ unsigned long delta;
int num = 0;
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
@@ -953,8 +954,11 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
fe->port_hi = f->dst->port_no >> 8;
fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
- if (!test_bit(BR_FDB_STATIC, &f->flags))
- fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
+ if (!test_bit(BR_FDB_STATIC, &f->flags)) {
+ delta = jiffies - READ_ONCE(f->updated);
+ fe->ageing_timer_value =
+ jiffies_delta_to_clock_t(delta);
+ }
++fe;
++num;
}
@@ -1002,8 +1006,8 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
unsigned long now = jiffies;
bool fdb_modified = false;
- if (now != fdb->updated) {
- fdb->updated = now;
+ if (now != READ_ONCE(fdb->updated)) {
+ WRITE_ONCE(fdb->updated, now);
fdb_modified = __fdb_mark_active(fdb);
}
@@ -1242,10 +1246,10 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
if (fdb_handle_notify(fdb, notify))
modified = true;
- fdb->used = jiffies;
+ WRITE_ONCE(fdb->used, jiffies);
if (modified) {
if (refresh)
- fdb->updated = jiffies;
+ WRITE_ONCE(fdb->updated, jiffies);
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
}
@@ -1556,7 +1560,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
goto err_unlock;
}
- fdb->updated = jiffies;
+ WRITE_ONCE(fdb->updated, jiffies);
if (READ_ONCE(fdb->dst) != p) {
WRITE_ONCE(fdb->dst, p);
@@ -1565,7 +1569,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
/* Refresh entry */
- fdb->used = jiffies;
+ WRITE_ONCE(fdb->used, jiffies);
} else {
modified = true;
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 777fa869c1a1..e355a15bf5ab 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -221,8 +221,8 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if (test_bit(BR_FDB_LOCAL, &dst->flags))
return br_pass_frame_up(skb, false);
- if (now != dst->used)
- dst->used = now;
+ if (now != READ_ONCE(dst->used))
+ WRITE_ONCE(dst->used, now);
br_forward(dst->dst, skb, local_rcv, false);
} else {
if (!mcast_hit)
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 613a911dda10..8656ab388c83 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1695,8 +1695,16 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
- if (session->transmission)
+ if (session->transmission) {
j1939_session_deactivate_activate_next(session);
+ } else if (session->state == J1939_SESSION_WAITING_ABORT) {
+ /* Force deactivation for the receiver.
+ * If we rely on the timer starting in j1939_session_cancel,
+ * a second RTS call here will cancel that timer and fail
+ * to restart it because the state is already WAITING_ABORT.
+ */
+ j1939_session_deactivate_activate_next(session);
+ }
return -EBUSY;
}
diff --git a/net/can/raw.c b/net/can/raw.c
index be1ef7cf4204..12293363413c 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -49,8 +49,8 @@
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/can.h>
+#include <linux/can/can-ml.h>
#include <linux/can/core.h>
-#include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */
#include <linux/can/skb.h>
#include <linux/can/raw.h>
#include <net/sock.h>
@@ -892,58 +892,21 @@ static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb)
}
}
-static inline bool raw_dev_cc_enabled(struct net_device *dev,
- struct can_priv *priv)
-{
- /* The CANXL-only mode disables error-signalling on the CAN bus
- * which is needed to send CAN CC/FD frames
- */
- if (priv)
- return !can_dev_in_xl_only_mode(priv);
-
- /* virtual CAN interfaces always support CAN CC */
- return true;
-}
-
-static inline bool raw_dev_fd_enabled(struct net_device *dev,
- struct can_priv *priv)
-{
- /* check FD ctrlmode on real CAN interfaces */
- if (priv)
- return (priv->ctrlmode & CAN_CTRLMODE_FD);
-
- /* check MTU for virtual CAN FD interfaces */
- return (READ_ONCE(dev->mtu) >= CANFD_MTU);
-}
-
-static inline bool raw_dev_xl_enabled(struct net_device *dev,
- struct can_priv *priv)
-{
- /* check XL ctrlmode on real CAN interfaces */
- if (priv)
- return (priv->ctrlmode & CAN_CTRLMODE_XL);
-
- /* check MTU for virtual CAN XL interfaces */
- return can_is_canxl_dev_mtu(READ_ONCE(dev->mtu));
-}
-
static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb,
struct net_device *dev)
{
- struct can_priv *priv = safe_candev_priv(dev);
-
/* Classical CAN */
- if (can_is_can_skb(skb) && raw_dev_cc_enabled(dev, priv))
+ if (can_is_can_skb(skb) && can_cap_enabled(dev, CAN_CAP_CC))
return CAN_MTU;
/* CAN FD */
if (ro->fd_frames && can_is_canfd_skb(skb) &&
- raw_dev_fd_enabled(dev, priv))
+ can_cap_enabled(dev, CAN_CAP_FD))
return CANFD_MTU;
/* CAN XL */
if (ro->xl_frames && can_is_canxl_skb(skb) &&
- raw_dev_xl_enabled(dev, priv))
+ can_cap_enabled(dev, CAN_CAP_XL))
return CANXL_MTU;
return 0;
@@ -982,6 +945,12 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (!dev)
return -ENXIO;
+ /* no sending on a CAN device in read-only mode */
+ if (can_cap_enabled(dev, CAN_CAP_RO)) {
+ err = -EACCES;
+ goto put_dev;
+ }
+
skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
diff --git a/net/core/dev.c b/net/core/dev.c
index 36dc5199037e..ccef685023c2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -478,15 +478,21 @@ static const unsigned short netdev_lock_type[] = {
ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
+ ARPHRD_CAN, ARPHRD_MCTP,
ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
- ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
+ ARPHRD_RAWHDLC, ARPHRD_RAWIP,
+ ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
- ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
- ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
+ ARPHRD_IEEE80211_RADIOTAP,
+ ARPHRD_IEEE802154, ARPHRD_IEEE802154_MONITOR,
+ ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
+ ARPHRD_CAIF, ARPHRD_IP6GRE, ARPHRD_NETLINK, ARPHRD_6LOWPAN,
+ ARPHRD_VSOCKMON,
+ ARPHRD_VOID, ARPHRD_NONE};
static const char *const netdev_lock_name[] = {
"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
@@ -495,15 +501,21 @@ static const char *const netdev_lock_name[] = {
"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
+ "_xmit_CAN", "_xmit_MCTP",
"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
- "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
+ "_xmit_RAWHDLC", "_xmit_RAWIP",
+ "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
- "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
- "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
+ "_xmit_IEEE80211_RADIOTAP",
+ "_xmit_IEEE802154", "_xmit_IEEE802154_MONITOR",
+ "_xmit_PHONET", "_xmit_PHONET_PIPE",
+ "_xmit_CAIF", "_xmit_IP6GRE", "_xmit_NETLINK", "_xmit_6LOWPAN",
+ "_xmit_VSOCKMON",
+ "_xmit_VOID", "_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -516,6 +528,7 @@ static inline unsigned short netdev_lock_pos(unsigned short dev_type)
if (netdev_lock_type[i] == dev_type)
return i;
/* the last key is used by default */
+ WARN_ONCE(1, "netdev_lock_pos() could not find dev_type=%u\n", dev_type);
return ARRAY_SIZE(netdev_lock_type) - 1;
}
@@ -4190,8 +4203,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
do {
if (first_n && !defer_count) {
defer_count = atomic_long_inc_return(&q->defer_count);
- if (unlikely(defer_count > READ_ONCE(q->limit))) {
- kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_DROP);
+ if (unlikely(defer_count > READ_ONCE(net_hotdata.qdisc_max_burst))) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_BURST_DROP);
return NET_XMIT_DROP;
}
}
@@ -4209,7 +4222,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
ll_list = llist_del_all(&q->defer_list);
/* There is a small race because we clear defer_count not atomically
* with the prior llist_del_all(). This means defer_list could grow
- * over q->limit.
+ * over qdisc_max_burst.
*/
atomic_long_set(&q->defer_count, 0);
diff --git a/net/core/dst.c b/net/core/dst.c
index e9d35f49c9e7..1dae26c51ebe 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -68,6 +68,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
dst->lwtstate = NULL;
rcuref_init(&dst->__rcuref, 1);
INIT_LIST_HEAD(&dst->rt_uncached);
+ dst->rt_uncached_list = NULL;
dst->__use = 0;
dst->lastuse = jiffies;
dst->flags = flags;
diff --git a/net/core/hotdata.c b/net/core/hotdata.c
index dddd5c287cf0..a6db36580817 100644
--- a/net/core/hotdata.c
+++ b/net/core/hotdata.c
@@ -17,6 +17,7 @@ struct net_hotdata net_hotdata __cacheline_aligned = {
.tstamp_prequeue = 1,
.max_backlog = 1000,
+ .qdisc_max_burst = 1000,
.dev_tx_weight = 64,
.dev_rx_weight = 64,
.sysctl_max_skb_frags = MAX_SKB_FRAGS,
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 8d4decb2606f..05dd55cf8b58 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -430,6 +430,13 @@ static struct ctl_table net_core_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "qdisc_max_burst",
+ .data = &net_hotdata.qdisc_max_burst,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "netdev_rss_key",
.data = &netdev_rss_key,
.maxlen = sizeof(int),
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 05828d4cb6cd..abd77162f5e7 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -122,8 +122,8 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
- const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
- XFRM_MODE_SKB_CB(skb)->protocol);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x, xo->proto);
__be16 type = inner_mode->family == AF_INET6 ? htons(ETH_P_IPV6)
: htons(ETH_P_IP);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 8178c44a3cdd..e13244729ad8 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -891,10 +891,17 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
const void *daddr, const void *saddr, unsigned int len)
{
struct ip_tunnel *t = netdev_priv(dev);
- struct iphdr *iph;
struct gre_base_hdr *greh;
+ struct iphdr *iph;
+ int needed;
+
+ needed = t->hlen + sizeof(*iph);
+ if (skb_headroom(skb) < needed &&
+ pskb_expand_head(skb, HH_DATA_ALIGN(needed - skb_headroom(skb)),
+ 0, GFP_ATOMIC))
+ return -needed;
- iph = skb_push(skb, t->hlen + sizeof(*iph));
+ iph = skb_push(skb, needed);
greh = (struct gre_base_hdr *)(iph+1);
greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
greh->protocol = htons(type);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 158a30ae7c5f..50d0f5fe4e4c 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -1281,7 +1281,7 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
}
EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
-int ip_tunnel_init(struct net_device *dev)
+int __ip_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
@@ -1308,10 +1308,9 @@ int ip_tunnel_init(struct net_device *dev)
if (tunnel->collect_md)
netif_keep_dst(dev);
- netdev_lockdep_set_classes(dev);
return 0;
}
-EXPORT_SYMBOL_GPL(ip_tunnel_init);
+EXPORT_SYMBOL_GPL(__ip_tunnel_init);
void ip_tunnel_uninit(struct net_device *dev)
{
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b549d6a57307..11d990703d31 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1537,9 +1537,9 @@ void rt_add_uncached_list(struct rtable *rt)
void rt_del_uncached_list(struct rtable *rt)
{
- if (!list_empty(&rt->dst.rt_uncached)) {
- struct uncached_list *ul = rt->dst.rt_uncached_list;
+ struct uncached_list *ul = rt->dst.rt_uncached_list;
+ if (ul) {
spin_lock_bh(&ul->lock);
list_del_init(&rt->dst.rt_uncached);
spin_unlock_bh(&ul->lock);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b66217d1b2f8..27ab9d7adc64 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3112,12 +3112,12 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
in6_ifa_hold(ifp);
read_unlock_bh(&idev->lock);
- ipv6_del_addr(ifp);
-
if (!(ifp->flags & IFA_F_TEMPORARY) &&
(ifp->flags & IFA_F_MANAGETEMPADDR))
delete_tempaddrs(idev, ifp);
+ ipv6_del_addr(ifp);
+
addrconf_verify_rtnl(net);
if (ipv6_addr_is_multicast(pfx)) {
ipv6_mc_config(net->ipv6.mc_autojoin_sk,
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 22410243ebe8..22895521a57d 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -158,8 +158,8 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
- const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
- XFRM_MODE_SKB_CB(skb)->protocol);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x, xo->proto);
__be16 type = inner_mode->family == AF_INET ? htons(ETH_P_IP)
: htons(ETH_P_IPV6);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6405072050e0..c1f39735a236 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -844,7 +844,7 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
skb_reset_network_header(skb);
- if (!pskb_inet_may_pull(skb)) {
+ if (skb_vlan_inet_prepare(skb, true)) {
DEV_STATS_INC(tunnel->dev, rx_length_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a3e051dc66ee..e3a260a5564b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -148,9 +148,9 @@ void rt6_uncached_list_add(struct rt6_info *rt)
void rt6_uncached_list_del(struct rt6_info *rt)
{
- if (!list_empty(&rt->dst.rt_uncached)) {
- struct uncached_list *ul = rt->dst.rt_uncached_list;
+ struct uncached_list *ul = rt->dst.rt_uncached_list;
+ if (ul) {
spin_lock_bh(&ul->lock);
list_del_init(&rt->dst.rt_uncached);
spin_unlock_bh(&ul->lock);
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index f4013b547438..9d59090bbe93 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -529,8 +529,10 @@ set_change_agg:
return 0;
destroy_class:
- qdisc_put(cl->qdisc);
- kfree(cl);
+ if (!existing) {
+ qdisc_put(cl->qdisc);
+ kfree(cl);
+ }
return err;
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9e14e453b55c..98b362d51836 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -3151,6 +3151,7 @@ int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
int err;
if (family == AF_INET &&
+ (!x->dir || x->dir == XFRM_SA_DIR_OUT) &&
READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc))
x->props.flags |= XFRM_STATE_NOPMTUDISC;
diff --git a/rust/helpers/bitops.c b/rust/helpers/bitops.c
index 5d0861d29d3f..e79ef9e6d98f 100644
--- a/rust/helpers/bitops.c
+++ b/rust/helpers/bitops.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/find.h>
void rust_helper___set_bit(unsigned long nr, unsigned long *addr)
{
@@ -21,3 +22,44 @@ void rust_helper_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
clear_bit(nr, addr);
}
+
+/*
+ * The rust_helper_ prefix is intentionally omitted below so that the
+ * declarations in include/linux/find.h are compatible with these helpers.
+ *
+ * Note that the below #ifdefs mean that the helper is only created if C does
+ * not provide a definition.
+ */
+#ifdef find_first_zero_bit
+__rust_helper
+unsigned long _find_first_zero_bit(const unsigned long *p, unsigned long size)
+{
+ return find_first_zero_bit(p, size);
+}
+#endif /* find_first_zero_bit */
+
+#ifdef find_next_zero_bit
+__rust_helper
+unsigned long _find_next_zero_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
+#endif /* find_next_zero_bit */
+
+#ifdef find_first_bit
+__rust_helper
+unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ return find_first_bit(addr, size);
+}
+#endif /* find_first_bit */
+
+#ifdef find_next_bit
+__rust_helper
+unsigned long _find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+#endif /* find_next_bit */
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index a82dd155e1d3..b12df5b5ddfc 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1074,7 +1074,9 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
runtime->oss.params = 0;
runtime->oss.prepare = 1;
runtime->oss.buffer_used = 0;
- snd_pcm_runtime_buffer_set_silence(runtime);
+ err = snd_pcm_runtime_buffer_set_silence(runtime);
+ if (err < 0)
+ goto failure;
runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 68bee40c9ada..932a9bf98cbc 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -730,13 +730,18 @@ static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
}
/* fill the PCM buffer with the current silence format; called from pcm_oss.c */
-void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime)
+int snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime)
{
- snd_pcm_buffer_access_lock(runtime);
+ int err;
+
+ err = snd_pcm_buffer_access_lock(runtime);
+ if (err < 0)
+ return err;
if (runtime->dma_area)
snd_pcm_format_set_silence(runtime->format, runtime->dma_area,
bytes_to_samples(runtime, runtime->dma_bytes));
snd_pcm_buffer_access_unlock(runtime);
+ return 0;
}
EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence);
diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c
index 61c7372e6307..29469e549791 100644
--- a/sound/hda/codecs/realtek/alc269.c
+++ b/sound/hda/codecs/realtek/alc269.c
@@ -6613,6 +6613,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8a2e, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8a30, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8a31, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8a34, "HP Pavilion x360 2-in-1 Laptop 14-ek0xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8a4f, "HP Victus 15-fa0xxx (MB 8A4F)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8a6e, "HP EDNA 360", ALC287_FIXUP_CS35L41_I2C_4),
SND_PCI_QUIRK(0x103c, 0x8a74, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
@@ -6817,6 +6818,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8f42, "HP ZBook 8 G2a 14W", ALC245_FIXUP_HP_TAS2781_I2C_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8f57, "HP Trekker G7JC", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8f62, "HP ZBook 8 G2a 16W", ALC245_FIXUP_HP_TAS2781_I2C_MUTE_LED),
+ SND_PCI_QUIRK(0x1043, 0x1024, "ASUS Zephyrus G14 2025", ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1032, "ASUS VivoBook X513EA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1034, "ASUS GU605C", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
diff --git a/sound/hda/codecs/side-codecs/cirrus_scodec_test.c b/sound/hda/codecs/side-codecs/cirrus_scodec_test.c
index 3cca750857b6..dc35932b6b22 100644
--- a/sound/hda/codecs/side-codecs/cirrus_scodec_test.c
+++ b/sound/hda/codecs/side-codecs/cirrus_scodec_test.c
@@ -103,6 +103,7 @@ static int cirrus_scodec_test_gpio_probe(struct platform_device *pdev)
/* GPIO core modifies our struct gpio_chip so use a copy */
gpio_priv->chip = cirrus_scodec_test_gpio_chip;
+ gpio_priv->chip.parent = &pdev->dev;
ret = devm_gpiochip_add_data(&pdev->dev, &gpio_priv->chip, gpio_priv);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to add gpiochip\n");
@@ -319,7 +320,7 @@ static struct kunit_case cirrus_scodec_test_cases[] = {
};
static struct kunit_suite cirrus_scodec_test_suite = {
- .name = "snd-hda-scodec-cs35l56-test",
+ .name = "snd-hda-cirrus-scodec-test",
.init = cirrus_scodec_test_case_init,
.test_cases = cirrus_scodec_test_cases,
};
diff --git a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c
index f7a7f216d586..624a822341bb 100644
--- a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c
+++ b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c
@@ -2,7 +2,7 @@
//
// TAS2781 HDA I2C driver
//
-// Copyright 2023 - 2025 Texas Instruments, Inc.
+// Copyright 2023 - 2026 Texas Instruments, Inc.
//
// Author: Shenghao Ding <shenghao-ding@ti.com>
// Current maintainer: Baojun Xu <baojun.xu@ti.com>
@@ -60,6 +60,7 @@ struct tas2781_hda_i2c_priv {
int (*save_calibration)(struct tas2781_hda *h);
int hda_chip_id;
+ bool skip_calibration;
};
static int tas2781_get_i2c_res(struct acpi_resource *ares, void *data)
@@ -491,7 +492,8 @@ static void tasdevice_dspfw_init(void *context)
/* If calibrated data occurs error, dsp will still works with default
* calibrated data inside algo.
*/
- hda_priv->save_calibration(tas_hda);
+ if (!hda_priv->skip_calibration)
+ hda_priv->save_calibration(tas_hda);
}
static void tasdev_fw_ready(const struct firmware *fmw, void *context)
@@ -548,6 +550,7 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
void *master_data)
{
struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ struct tas2781_hda_i2c_priv *hda_priv = tas_hda->hda_priv;
struct hda_component_parent *parent = master_data;
struct hda_component *comp;
struct hda_codec *codec;
@@ -568,11 +571,22 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
case 0x1028:
tas_hda->catlog_id = DELL;
break;
+ case 0x103C:
+ tas_hda->catlog_id = HP;
+ break;
default:
tas_hda->catlog_id = LENOVO;
break;
}
+ /*
+ * Using ASUS ROG Xbox Ally X (RC73XA) UEFI calibration data
+ * causes audio dropouts during playback, use fallback data
+ * from DSP firmware as a workaround.
+ */
+ if (codec->core.subsystem_id == 0x10431384)
+ hda_priv->skip_calibration = true;
+
pm_runtime_get_sync(dev);
comp->dev = dev;
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index bf4d9d336561..0294177acc66 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -420,6 +420,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6500RE"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "M6501RM"),
}
},
diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
index 443cf59cb71a..fdf4a9add852 100644
--- a/sound/soc/codecs/tlv320adcx140.c
+++ b/sound/soc/codecs/tlv320adcx140.c
@@ -23,7 +23,6 @@
#include "tlv320adcx140.h"
struct adcx140_priv {
- struct snd_soc_component *component;
struct regulator *supply_areg;
struct gpio_desc *gpio_reset;
struct regmap *regmap;
@@ -338,7 +337,7 @@ static const struct snd_kcontrol_new adcx140_dapm_ch4_dre_en_switch =
SOC_DAPM_SINGLE("Switch", ADCX140_CH4_CFG0, 0, 1, 0);
static const struct snd_kcontrol_new adcx140_dapm_dre_en_switch =
- SOC_DAPM_SINGLE("Switch", ADCX140_DSP_CFG1, 3, 1, 0);
+ SOC_DAPM_SINGLE("Switch", ADCX140_DSP_CFG1, 3, 1, 1);
/* Output Mixer */
static const struct snd_kcontrol_new adcx140_output_mixer_controls[] = {
@@ -699,7 +698,6 @@ static void adcx140_pwr_ctrl(struct adcx140_priv *adcx140, bool power_state)
{
int pwr_ctrl = 0;
int ret = 0;
- struct snd_soc_component *component = adcx140->component;
if (power_state)
pwr_ctrl = ADCX140_PWR_CFG_ADC_PDZ | ADCX140_PWR_CFG_PLL_PDZ;
@@ -711,7 +709,7 @@ static void adcx140_pwr_ctrl(struct adcx140_priv *adcx140, bool power_state)
ret = regmap_write(adcx140->regmap, ADCX140_PHASE_CALIB,
adcx140->phase_calib_on ? 0x00 : 0x40);
if (ret)
- dev_err(component->dev, "%s: register write error %d\n",
+ dev_err(adcx140->dev, "%s: register write error %d\n",
__func__, ret);
}
@@ -727,7 +725,7 @@ static int adcx140_hw_params(struct snd_pcm_substream *substream,
struct adcx140_priv *adcx140 = snd_soc_component_get_drvdata(component);
u8 data = 0;
- switch (params_width(params)) {
+ switch (params_physical_width(params)) {
case 16:
data = ADCX140_16_BIT_WORD;
break;
@@ -742,7 +740,7 @@ static int adcx140_hw_params(struct snd_pcm_substream *substream,
break;
default:
dev_err(component->dev, "%s: Unsupported width %d\n",
- __func__, params_width(params));
+ __func__, params_physical_width(params));
return -EINVAL;
}
@@ -1156,6 +1154,9 @@ static int adcx140_i2c_probe(struct i2c_client *i2c)
adcx140->gpio_reset = devm_gpiod_get_optional(adcx140->dev,
"reset", GPIOD_OUT_LOW);
if (IS_ERR(adcx140->gpio_reset))
+ return dev_err_probe(&i2c->dev, PTR_ERR(adcx140->gpio_reset),
+ "Failed to get Reset GPIO\n");
+ if (!adcx140->gpio_reset)
dev_info(&i2c->dev, "Reset GPIO not defined\n");
adcx140->supply_areg = devm_regulator_get_optional(adcx140->dev,
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index d7aca6567c2d..2fc234adca5f 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -678,6 +678,7 @@ struct wsa881x_priv {
*/
unsigned int sd_n_val;
int active_ports;
+ bool hw_init;
bool port_prepared[WSA881X_MAX_SWR_PORTS];
bool port_enable[WSA881X_MAX_SWR_PORTS];
};
@@ -687,6 +688,9 @@ static void wsa881x_init(struct wsa881x_priv *wsa881x)
struct regmap *rm = wsa881x->regmap;
unsigned int val = 0;
+ if (wsa881x->hw_init)
+ return;
+
regmap_register_patch(wsa881x->regmap, wsa881x_rev_2_0,
ARRAY_SIZE(wsa881x_rev_2_0));
@@ -724,6 +728,8 @@ static void wsa881x_init(struct wsa881x_priv *wsa881x)
regmap_update_bits(rm, WSA881X_OTP_REG_28, 0x3F, 0x3A);
regmap_update_bits(rm, WSA881X_BONGO_RESRV_REG1, 0xFF, 0xB2);
regmap_update_bits(rm, WSA881X_BONGO_RESRV_REG2, 0xFF, 0x05);
+
+ wsa881x->hw_init = true;
}
static int wsa881x_component_probe(struct snd_soc_component *comp)
@@ -1067,6 +1073,9 @@ static int wsa881x_update_status(struct sdw_slave *slave,
{
struct wsa881x_priv *wsa881x = dev_get_drvdata(&slave->dev);
+ if (status == SDW_SLAVE_UNATTACHED)
+ wsa881x->hw_init = false;
+
if (status == SDW_SLAVE_ATTACHED && slave->dev_num > 0)
wsa881x_init(wsa881x);
diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
index c3046e260cb9..468d2b38a22a 100644
--- a/sound/soc/codecs/wsa883x.c
+++ b/sound/soc/codecs/wsa883x.c
@@ -475,6 +475,7 @@ struct wsa883x_priv {
int active_ports;
int dev_mode;
int comp_offset;
+ bool hw_init;
/*
* Protects temperature reading code (related to speaker protection) and
* fields: temperature and pa_on.
@@ -1043,6 +1044,9 @@ static int wsa883x_init(struct wsa883x_priv *wsa883x)
struct regmap *regmap = wsa883x->regmap;
int variant, version, ret;
+ if (wsa883x->hw_init)
+ return 0;
+
ret = regmap_read(regmap, WSA883X_OTP_REG_0, &variant);
if (ret)
return ret;
@@ -1054,22 +1058,23 @@ static int wsa883x_init(struct wsa883x_priv *wsa883x)
switch (variant) {
case WSA8830:
- dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8830\n",
- version);
+ dev_dbg(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8830\n",
+ version);
break;
case WSA8835:
- dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8835\n",
- version);
+ dev_dbg(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8835\n",
+ version);
break;
case WSA8832:
- dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8832\n",
- version);
+ dev_dbg(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8832\n",
+ version);
break;
case WSA8835_V2:
- dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8835_V2\n",
- version);
+ dev_dbg(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8835_V2\n",
+ version);
break;
default:
+ dev_warn(wsa883x->dev, "unknown variant: %d\n", variant);
break;
}
@@ -1085,6 +1090,8 @@ static int wsa883x_init(struct wsa883x_priv *wsa883x)
wsa883x->comp_offset);
}
+ wsa883x->hw_init = true;
+
return 0;
}
@@ -1093,6 +1100,9 @@ static int wsa883x_update_status(struct sdw_slave *slave,
{
struct wsa883x_priv *wsa883x = dev_get_drvdata(&slave->dev);
+ if (status == SDW_SLAVE_UNATTACHED)
+ wsa883x->hw_init = false;
+
if (status == SDW_SLAVE_ATTACHED && slave->dev_num > 0)
return wsa883x_init(wsa883x);
diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c
index 887edd2be705..6c6b497657d0 100644
--- a/sound/soc/codecs/wsa884x.c
+++ b/sound/soc/codecs/wsa884x.c
@@ -1534,7 +1534,7 @@ static void wsa884x_init(struct wsa884x_priv *wsa884x)
wsa884x_set_gain_parameters(wsa884x);
- wsa884x->hw_init = false;
+ wsa884x->hw_init = true;
}
static int wsa884x_update_status(struct sdw_slave *slave,
@@ -2109,7 +2109,6 @@ static int wsa884x_probe(struct sdw_slave *pdev,
/* Start in cache-only until device is enumerated */
regcache_cache_only(wsa884x->regmap, true);
- wsa884x->hw_init = true;
if (IS_REACHABLE(CONFIG_HWMON)) {
struct device *hwmon;
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 355f7ec8943c..bdc02e85b089 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -1179,9 +1179,9 @@ void graph_util_parse_link_direction(struct device_node *np,
bool is_playback_only = of_property_read_bool(np, "playback-only");
bool is_capture_only = of_property_read_bool(np, "capture-only");
- if (playback_only)
+ if (np && playback_only)
*playback_only = is_playback_only;
- if (capture_only)
+ if (np && capture_only)
*capture_only = is_capture_only;
}
EXPORT_SYMBOL_GPL(graph_util_parse_link_direction);
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 2c1001148d54..8721a098d53f 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -767,6 +767,14 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
{
.callback = sof_sdw_quirk_cb,
.matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0DD6")
+ },
+ .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_ptlrvp"),
},
.driver_data = (void *)(SOC_SDW_PCH_DMIC),
diff --git a/sound/soc/sdw_utils/soc_sdw_cs42l43.c b/sound/soc/sdw_utils/soc_sdw_cs42l43.c
index 4c954501e500..2685ff4f0932 100644
--- a/sound/soc/sdw_utils/soc_sdw_cs42l43.c
+++ b/sound/soc/sdw_utils/soc_sdw_cs42l43.c
@@ -44,7 +44,7 @@ static const struct snd_soc_dapm_route cs42l43_dmic_map[] = {
static struct snd_soc_jack_pin soc_jack_pins[] = {
{
.pin = "Headphone",
- .mask = SND_JACK_HEADPHONE,
+ .mask = SND_JACK_HEADPHONE | SND_JACK_LINEOUT,
},
{
.pin = "Headset Mic",
diff --git a/sound/soc/sdw_utils/soc_sdw_utils.c b/sound/soc/sdw_utils/soc_sdw_utils.c
index bf382aa07e92..ccf149f949e8 100644
--- a/sound/soc/sdw_utils/soc_sdw_utils.c
+++ b/sound/soc/sdw_utils/soc_sdw_utils.c
@@ -841,6 +841,19 @@ struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_part(const u64 adr)
}
EXPORT_SYMBOL_NS(asoc_sdw_find_codec_info_part, "SND_SOC_SDW_UTILS");
+static struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_sdw_id(const struct sdw_slave_id *id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
+ if (id->part_id == codec_info_list[i].part_id &&
+ (!codec_info_list[i].version_id ||
+ id->sdw_version == codec_info_list[i].version_id))
+ return &codec_info_list[i];
+
+ return NULL;
+}
+
struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_acpi(const u8 *acpi_id)
{
int i;
@@ -873,22 +886,46 @@ struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_dai(const char *dai_name, i
}
EXPORT_SYMBOL_NS(asoc_sdw_find_codec_info_dai, "SND_SOC_SDW_UTILS");
+static int asoc_sdw_find_codec_info_dai_index(const struct asoc_sdw_codec_info *codec_info,
+ const char *dai_name)
+{
+ int i;
+
+ for (i = 0; i < codec_info->dai_num; i++) {
+ if (!strcmp(codec_info->dais[i].dai_name, dai_name))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
int asoc_sdw_rtd_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct snd_soc_dapm_context *dapm = snd_soc_card_to_dapm(card);
struct asoc_sdw_codec_info *codec_info;
struct snd_soc_dai *dai;
+ struct sdw_slave *sdw_peripheral;
const char *spk_components="";
int dai_index;
int ret;
int i;
for_each_rtd_codec_dais(rtd, i, dai) {
- codec_info = asoc_sdw_find_codec_info_dai(dai->name, &dai_index);
+ if (is_sdw_slave(dai->component->dev))
+ sdw_peripheral = dev_to_sdw_dev(dai->component->dev);
+ else if (dai->component->dev->parent && is_sdw_slave(dai->component->dev->parent))
+ sdw_peripheral = dev_to_sdw_dev(dai->component->dev->parent);
+ else
+ continue;
+
+ codec_info = asoc_sdw_find_codec_info_sdw_id(&sdw_peripheral->id);
if (!codec_info)
return -EINVAL;
+ dai_index = asoc_sdw_find_codec_info_dai_index(codec_info, dai->name);
+ WARN_ON(dai_index < 0);
+
/*
* A codec dai can be connected to different dai links for capture and playback,
* but we only need to call the rtd_init function once.
@@ -898,6 +935,10 @@ int asoc_sdw_rtd_init(struct snd_soc_pcm_runtime *rtd)
if (codec_info->dais[dai_index].rtd_init_done)
continue;
+ dev_dbg(card->dev, "%#x/%s initializing for %s/%s\n",
+ codec_info->part_id, codec_info->dais[dai_index].dai_name,
+ dai->component->name, dai->name);
+
/*
* Add card controls and dapm widgets for the first codec dai.
* The controls and widgets will be used for all codec dais.
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 624e9269fc25..ba42939d5f01 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -543,11 +543,11 @@ int snd_soc_bytes_get(struct snd_kcontrol *kcontrol,
ucontrol->value.bytes.data[0] &= ~params->mask;
break;
case 2:
- ((u16 *)(&ucontrol->value.bytes.data))[0]
+ ((__be16 *)(&ucontrol->value.bytes.data))[0]
&= cpu_to_be16(~params->mask);
break;
case 4:
- ((u32 *)(&ucontrol->value.bytes.data))[0]
+ ((__be32 *)(&ucontrol->value.bytes.data))[0]
&= cpu_to_be32(~params->mask);
break;
default:
diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c
index 261d9067d27b..e795907a3963 100644
--- a/sound/soc/tegra/tegra210_ahub.c
+++ b/sound/soc/tegra/tegra210_ahub.c
@@ -2077,7 +2077,7 @@ static const struct regmap_config tegra210_ahub_regmap_config = {
.val_bits = 32,
.reg_stride = 4,
.max_register = TEGRA210_MAX_REGISTER_ADDR,
- .cache_type = REGCACHE_FLAT_S,
+ .cache_type = REGCACHE_FLAT,
};
static const struct regmap_config tegra186_ahub_regmap_config = {
@@ -2085,7 +2085,7 @@ static const struct regmap_config tegra186_ahub_regmap_config = {
.val_bits = 32,
.reg_stride = 4,
.max_register = TEGRA186_MAX_REGISTER_ADDR,
- .cache_type = REGCACHE_FLAT_S,
+ .cache_type = REGCACHE_FLAT,
};
static const struct regmap_config tegra264_ahub_regmap_config = {
@@ -2094,7 +2094,7 @@ static const struct regmap_config tegra264_ahub_regmap_config = {
.reg_stride = 4,
.writeable_reg = tegra264_ahub_wr_reg,
.max_register = TEGRA264_MAX_REGISTER_ADDR,
- .cache_type = REGCACHE_FLAT_S,
+ .cache_type = REGCACHE_FLAT,
};
static const struct tegra_ahub_soc_data soc_data_tegra210 = {
diff --git a/sound/soc/ti/davinci-evm.c b/sound/soc/ti/davinci-evm.c
index 3848766d96c3..ad514c2e5a25 100644
--- a/sound/soc/ti/davinci-evm.c
+++ b/sound/soc/ti/davinci-evm.c
@@ -194,27 +194,32 @@ static int davinci_evm_probe(struct platform_device *pdev)
return -EINVAL;
dai->cpus->of_node = of_parse_phandle(np, "ti,mcasp-controller", 0);
- if (!dai->cpus->of_node)
- return -EINVAL;
+ if (!dai->cpus->of_node) {
+ ret = -EINVAL;
+ goto err_put;
+ }
dai->platforms->of_node = dai->cpus->of_node;
evm_soc_card.dev = &pdev->dev;
ret = snd_soc_of_parse_card_name(&evm_soc_card, "ti,model");
if (ret)
- return ret;
+ goto err_put;
mclk = devm_clk_get(&pdev->dev, "mclk");
if (PTR_ERR(mclk) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto err_put;
} else if (IS_ERR(mclk)) {
dev_dbg(&pdev->dev, "mclk not found.\n");
mclk = NULL;
}
drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
drvdata->mclk = mclk;
@@ -224,7 +229,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
if (!drvdata->mclk) {
dev_err(&pdev->dev,
"No clock or clock rate defined.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put;
}
drvdata->sysclk = clk_get_rate(drvdata->mclk);
} else if (drvdata->mclk) {
@@ -240,8 +246,25 @@ static int davinci_evm_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(&evm_soc_card, drvdata);
ret = devm_snd_soc_register_card(&pdev->dev, &evm_soc_card);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+ goto err_put;
+ }
+
+ return ret;
+
+err_put:
+ dai->platforms->of_node = NULL;
+
+ if (dai->cpus->of_node) {
+ of_node_put(dai->cpus->of_node);
+ dai->cpus->of_node = NULL;
+ }
+
+ if (dai->codecs->of_node) {
+ of_node_put(dai->codecs->of_node);
+ dai->codecs->of_node = NULL;
+ }
return ret;
}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 54d01dfd820f..263abb36bb2d 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -1553,7 +1553,7 @@ static int prepare_playback_urb(struct snd_usb_substream *subs,
for (i = 0; i < ctx->packets; i++) {
counts = snd_usb_endpoint_next_packet_size(ep, ctx, i, avail);
- if (counts < 0)
+ if (counts < 0 || frames + counts >= ep->max_urb_frames)
break;
/* set up descriptor */
urb->iso_frame_desc[i].offset = frames * stride;
diff --git a/tools/net/ynl/pyynl/lib/doc_generator.py b/tools/net/ynl/pyynl/lib/doc_generator.py
index 3a16b8eb01ca..8b922d8f89e8 100644
--- a/tools/net/ynl/pyynl/lib/doc_generator.py
+++ b/tools/net/ynl/pyynl/lib/doc_generator.py
@@ -166,13 +166,13 @@ class YnlDocGenerator:
continue
lines.append(self.fmt.rst_paragraph(self.fmt.bold(key), level + 1))
if key in ['request', 'reply']:
- lines.append(self.parse_do_attributes(do_dict[key], level + 1) + "\n")
+ lines.append(self.parse_op_attributes(do_dict[key], level + 1) + "\n")
else:
lines.append(self.fmt.headroom(level + 2) + do_dict[key] + "\n")
return "\n".join(lines)
- def parse_do_attributes(self, attrs: Dict[str, Any], level: int = 0) -> str:
+ def parse_op_attributes(self, attrs: Dict[str, Any], level: int = 0) -> str:
"""Parse 'attributes' section"""
if "attributes" not in attrs:
return ""
@@ -184,7 +184,7 @@ class YnlDocGenerator:
def parse_operations(self, operations: List[Dict[str, Any]], namespace: str) -> str:
"""Parse operations block"""
- preprocessed = ["name", "doc", "title", "do", "dump", "flags"]
+ preprocessed = ["name", "doc", "title", "do", "dump", "flags", "event"]
linkable = ["fixed-header", "attribute-set"]
lines = []
@@ -217,6 +217,9 @@ class YnlDocGenerator:
if "dump" in operation:
lines.append(self.fmt.rst_paragraph(":dump:", 0))
lines.append(self.parse_do(operation["dump"], 0))
+ if "event" in operation:
+ lines.append(self.fmt.rst_paragraph(":event:", 0))
+ lines.append(self.parse_op_attributes(operation["event"], 0))
# New line after fields
lines.append("\n")
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
index ee94c281888a..26159e0499c7 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
@@ -47,6 +47,7 @@ void test_xdp_context_test_run(void)
struct test_xdp_context_test_run *skel = NULL;
char data[sizeof(pkt_v4) + sizeof(__u32)];
char bad_ctx[sizeof(struct xdp_md) + 1];
+ char large_data[256];
struct xdp_md ctx_in, ctx_out;
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
@@ -94,9 +95,6 @@ void test_xdp_context_test_run(void)
test_xdp_context_error(prog_fd, opts, 4, sizeof(__u32), sizeof(data),
0, 0, 0);
- /* Meta data must be 255 bytes or smaller */
- test_xdp_context_error(prog_fd, opts, 0, 256, sizeof(data), 0, 0, 0);
-
/* Total size of data must be data_end - data_meta or larger */
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
sizeof(data) + 1, 0, 0, 0);
@@ -116,6 +114,16 @@ void test_xdp_context_test_run(void)
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
0, 0, 1);
+ /* Meta data must be 216 bytes or smaller (256 - sizeof(struct
+ * xdp_frame)). Test both nearest invalid size and nearest invalid
+ * 4-byte-aligned size, and make sure data_in is large enough that we
+ * actually hit the check on metadata length
+ */
+ opts.data_in = large_data;
+ opts.data_size_in = sizeof(large_data);
+ test_xdp_context_error(prog_fd, opts, 0, 217, sizeof(large_data), 0, 0, 0);
+ test_xdp_context_error(prog_fd, opts, 0, 220, sizeof(large_data), 0, 0, 0);
+
test_xdp_context_test_run__destroy(skel);
}
diff --git a/tools/testing/selftests/drivers/net/hw/toeplitz.c b/tools/testing/selftests/drivers/net/hw/toeplitz.c
index d23b3b0c20a3..285bb17df9c2 100644
--- a/tools/testing/selftests/drivers/net/hw/toeplitz.c
+++ b/tools/testing/selftests/drivers/net/hw/toeplitz.c
@@ -485,8 +485,8 @@ static void parse_rps_bitmap(const char *arg)
bitmap = strtoul(arg, NULL, 0);
- if (bitmap & ~(RPS_MAX_CPUS - 1))
- error(1, 0, "rps bitmap 0x%lx out of bounds 0..%lu",
+ if (bitmap & ~((1UL << RPS_MAX_CPUS) - 1))
+ error(1, 0, "rps bitmap 0x%lx out of bounds, max cpu %lu",
bitmap, RPS_MAX_CPUS - 1);
for (i = 0; i < RPS_MAX_CPUS; i++)
diff --git a/tools/testing/selftests/drivers/net/hw/toeplitz.py b/tools/testing/selftests/drivers/net/hw/toeplitz.py
index d2db5ee9e358..d288c57894f6 100755
--- a/tools/testing/selftests/drivers/net/hw/toeplitz.py
+++ b/tools/testing/selftests/drivers/net/hw/toeplitz.py
@@ -94,12 +94,14 @@ def _configure_rps(cfg, rps_cpus):
mask = 0
for cpu in rps_cpus:
mask |= (1 << cpu)
- mask = hex(mask)[2:]
+
+ mask = hex(mask)
# Set RPS bitmap for all rx queues
for rps_file in glob.glob(f"/sys/class/net/{cfg.ifname}/queues/rx-*/rps_cpus"):
with open(rps_file, "w", encoding="utf-8") as fp:
- fp.write(mask)
+ # sysfs expects hex without '0x' prefix, toeplitz.c needs the prefix
+ fp.write(mask[2:])
return mask
diff --git a/tools/testing/selftests/kvm/x86/amx_test.c b/tools/testing/selftests/kvm/x86/amx_test.c
index f4ce5a185a7d..37b166260ee3 100644
--- a/tools/testing/selftests/kvm/x86/amx_test.c
+++ b/tools/testing/selftests/kvm/x86/amx_test.c
@@ -69,6 +69,12 @@ static inline void __tileloadd(void *tile)
: : "a"(tile), "d"(0));
}
+static inline int tileloadd_safe(void *tile)
+{
+ return kvm_asm_safe(".byte 0xc4,0xe2,0x7b,0x4b,0x04,0x10",
+ "a"(tile), "d"(0));
+}
+
static inline void __tilerelease(void)
{
asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::);
@@ -124,27 +130,52 @@ static void set_tilecfg(struct tile_config *cfg)
}
}
+enum {
+ /* Retrieve TMM0 from guest, stash it for TEST_RESTORE_TILEDATA */
+ TEST_SAVE_TILEDATA = 1,
+
+ /* Check TMM0 against tiledata */
+ TEST_COMPARE_TILEDATA = 2,
+
+ /* Restore TMM0 from earlier save */
+ TEST_RESTORE_TILEDATA = 4,
+
+ /* Full VM save/restore */
+ TEST_SAVE_RESTORE = 8,
+};
+
static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
struct tile_data *tiledata,
struct xstate *xstate)
{
+ int vector;
+
GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE) &&
this_cpu_has(X86_FEATURE_OSXSAVE));
check_xtile_info();
- GUEST_SYNC(1);
+ GUEST_SYNC(TEST_SAVE_RESTORE);
/* xfd=0, enable amx */
wrmsr(MSR_IA32_XFD, 0);
- GUEST_SYNC(2);
+ GUEST_SYNC(TEST_SAVE_RESTORE);
GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == 0);
set_tilecfg(amx_cfg);
__ldtilecfg(amx_cfg);
- GUEST_SYNC(3);
+ GUEST_SYNC(TEST_SAVE_RESTORE);
/* Check save/restore when trap to userspace */
__tileloadd(tiledata);
- GUEST_SYNC(4);
+ GUEST_SYNC(TEST_SAVE_TILEDATA | TEST_COMPARE_TILEDATA | TEST_SAVE_RESTORE);
+
+ /* xfd=0x40000, disable amx tiledata */
+ wrmsr(MSR_IA32_XFD, XFEATURE_MASK_XTILE_DATA);
+
+ /* host tries setting tiledata while guest XFD is set */
+ GUEST_SYNC(TEST_RESTORE_TILEDATA);
+ GUEST_SYNC(TEST_SAVE_RESTORE);
+
+ wrmsr(MSR_IA32_XFD, 0);
__tilerelease();
- GUEST_SYNC(5);
+ GUEST_SYNC(TEST_SAVE_RESTORE);
/*
* After XSAVEC, XTILEDATA is cleared in the xstate_bv but is set in
* the xcomp_bv.
@@ -154,6 +185,8 @@ static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
GUEST_ASSERT(!(xstate->header.xstate_bv & XFEATURE_MASK_XTILE_DATA));
GUEST_ASSERT(xstate->header.xcomp_bv & XFEATURE_MASK_XTILE_DATA);
+ /* #NM test */
+
/* xfd=0x40000, disable amx tiledata */
wrmsr(MSR_IA32_XFD, XFEATURE_MASK_XTILE_DATA);
@@ -166,32 +199,33 @@ static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
GUEST_ASSERT(!(xstate->header.xstate_bv & XFEATURE_MASK_XTILE_DATA));
GUEST_ASSERT((xstate->header.xcomp_bv & XFEATURE_MASK_XTILE_DATA));
- GUEST_SYNC(6);
+ GUEST_SYNC(TEST_SAVE_RESTORE);
GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
set_tilecfg(amx_cfg);
__ldtilecfg(amx_cfg);
- /* Trigger #NM exception */
- __tileloadd(tiledata);
- GUEST_SYNC(10);
- GUEST_DONE();
-}
+ /* Trigger #NM exception */
+ vector = tileloadd_safe(tiledata);
+ __GUEST_ASSERT(vector == NM_VECTOR,
+ "Wanted #NM on tileloadd with XFD[18]=1, got %s",
+ ex_str(vector));
-void guest_nm_handler(struct ex_regs *regs)
-{
- /* Check if #NM is triggered by XFEATURE_MASK_XTILE_DATA */
- GUEST_SYNC(7);
GUEST_ASSERT(!(get_cr0() & X86_CR0_TS));
GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILE_DATA);
GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
- GUEST_SYNC(8);
+ GUEST_SYNC(TEST_SAVE_RESTORE);
GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILE_DATA);
GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
/* Clear xfd_err */
wrmsr(MSR_IA32_XFD_ERR, 0);
/* xfd=0, enable amx */
wrmsr(MSR_IA32_XFD, 0);
- GUEST_SYNC(9);
+ GUEST_SYNC(TEST_SAVE_RESTORE);
+
+ __tileloadd(tiledata);
+ GUEST_SYNC(TEST_COMPARE_TILEDATA | TEST_SAVE_RESTORE);
+
+ GUEST_DONE();
}
int main(int argc, char *argv[])
@@ -200,10 +234,10 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_x86_state *state;
+ struct kvm_x86_state *tile_state = NULL;
int xsave_restore_size;
vm_vaddr_t amx_cfg, tiledata, xstate;
struct ucall uc;
- u32 amx_offset;
int ret;
/*
@@ -228,9 +262,6 @@ int main(int argc, char *argv[])
vcpu_regs_get(vcpu, &regs1);
- /* Register #NM handler */
- vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler);
-
/* amx cfg for guest_code */
amx_cfg = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize());
@@ -244,6 +275,7 @@ int main(int argc, char *argv[])
memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate);
+ int iter = 0;
for (;;) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
@@ -253,37 +285,47 @@ int main(int argc, char *argv[])
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
- switch (uc.args[1]) {
- case 1:
- case 2:
- case 3:
- case 5:
- case 6:
- case 7:
- case 8:
- fprintf(stderr, "GUEST_SYNC(%ld)\n", uc.args[1]);
- break;
- case 4:
- case 10:
- fprintf(stderr,
- "GUEST_SYNC(%ld), check save/restore status\n", uc.args[1]);
+ ++iter;
+ if (uc.args[1] & TEST_SAVE_TILEDATA) {
+ fprintf(stderr, "GUEST_SYNC #%d, save tiledata\n", iter);
+ tile_state = vcpu_save_state(vcpu);
+ }
+ if (uc.args[1] & TEST_COMPARE_TILEDATA) {
+ fprintf(stderr, "GUEST_SYNC #%d, check TMM0 contents\n", iter);
/* Compacted mode, get amx offset by xsave area
* size subtract 8K amx size.
*/
- amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE;
- state = vcpu_save_state(vcpu);
- void *amx_start = (void *)state->xsave + amx_offset;
+ u32 amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE;
+ void *amx_start = (void *)tile_state->xsave + amx_offset;
void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
/* Only check TMM0 register, 1 tile */
ret = memcmp(amx_start, tiles_data, TILE_SIZE);
TEST_ASSERT(ret == 0, "memcmp failed, ret=%d", ret);
+ }
+ if (uc.args[1] & TEST_RESTORE_TILEDATA) {
+ fprintf(stderr, "GUEST_SYNC #%d, before KVM_SET_XSAVE\n", iter);
+ vcpu_xsave_set(vcpu, tile_state->xsave);
+ fprintf(stderr, "GUEST_SYNC #%d, after KVM_SET_XSAVE\n", iter);
+ }
+ if (uc.args[1] & TEST_SAVE_RESTORE) {
+ fprintf(stderr, "GUEST_SYNC #%d, save/restore VM state\n", iter);
+ state = vcpu_save_state(vcpu);
+ memset(&regs1, 0, sizeof(regs1));
+ vcpu_regs_get(vcpu, &regs1);
+
+ kvm_vm_release(vm);
+
+ /* Restore state in a new VM. */
+ vcpu = vm_recreate_with_one_vcpu(vm);
+ vcpu_load_state(vcpu, state);
kvm_x86_state_cleanup(state);
- break;
- case 9:
- fprintf(stderr,
- "GUEST_SYNC(%ld), #NM exception and enable amx\n", uc.args[1]);
- break;
+
+ memset(&regs2, 0, sizeof(regs2));
+ vcpu_regs_get(vcpu, &regs2);
+ TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
+ "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
+ (ulong) regs2.rdi, (ulong) regs2.rsi);
}
break;
case UCALL_DONE:
@@ -293,22 +335,6 @@ int main(int argc, char *argv[])
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
- state = vcpu_save_state(vcpu);
- memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vcpu, &regs1);
-
- kvm_vm_release(vm);
-
- /* Restore state in a new VM. */
- vcpu = vm_recreate_with_one_vcpu(vm);
- vcpu_load_state(vcpu, state);
- kvm_x86_state_cleanup(state);
-
- memset(&regs2, 0, sizeof(regs2));
- vcpu_regs_get(vcpu, &regs2);
- TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
- "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
- (ulong) regs2.rdi, (ulong) regs2.rsi);
}
done:
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c
index 6279893a0adc..f61150d28eb2 100644
--- a/tools/testing/selftests/mm/gup_longterm.c
+++ b/tools/testing/selftests/mm/gup_longterm.c
@@ -179,7 +179,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
if (rw && shared && fs_is_unknown(fs_type)) {
ksft_print_msg("Unknown filesystem\n");
result = KSFT_SKIP;
- return;
+ break;
}
/*
* R/O pinning or pinning in a private mapping is always
diff --git a/tools/testing/selftests/mm/merge.c b/tools/testing/selftests/mm/merge.c
index 363c1033cc7d..10b686102b79 100644
--- a/tools/testing/selftests/mm/merge.c
+++ b/tools/testing/selftests/mm/merge.c
@@ -22,12 +22,37 @@ FIXTURE(merge)
struct procmap_fd procmap;
};
+static char *map_carveout(unsigned int page_size)
+{
+ return mmap(NULL, 30 * page_size, PROT_NONE,
+ MAP_ANON | MAP_PRIVATE, -1, 0);
+}
+
+static pid_t do_fork(struct procmap_fd *procmap)
+{
+ pid_t pid = fork();
+
+ if (pid == -1)
+ return -1;
+ if (pid != 0) {
+ wait(NULL);
+ return pid;
+ }
+
+ /* Reopen for child. */
+ if (close_procmap(procmap))
+ return -1;
+ if (open_self_procmap(procmap))
+ return -1;
+
+ return 0;
+}
+
FIXTURE_SETUP(merge)
{
self->page_size = psize();
/* Carve out PROT_NONE region to map over. */
- self->carveout = mmap(NULL, 30 * self->page_size, PROT_NONE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ self->carveout = map_carveout(self->page_size);
ASSERT_NE(self->carveout, MAP_FAILED);
/* Setup PROCMAP_QUERY interface. */
ASSERT_EQ(open_self_procmap(&self->procmap), 0);
@@ -36,7 +61,8 @@ FIXTURE_SETUP(merge)
FIXTURE_TEARDOWN(merge)
{
ASSERT_EQ(munmap(self->carveout, 30 * self->page_size), 0);
- ASSERT_EQ(close_procmap(&self->procmap), 0);
+ /* May fail for parent of forked process. */
+ close_procmap(&self->procmap);
/*
* Clear unconditionally, as some tests set this. It is no issue if this
* fails (KSM may be disabled for instance).
@@ -44,6 +70,44 @@ FIXTURE_TEARDOWN(merge)
prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
}
+FIXTURE(merge_with_fork)
+{
+ unsigned int page_size;
+ char *carveout;
+ struct procmap_fd procmap;
+};
+
+FIXTURE_VARIANT(merge_with_fork)
+{
+ bool forked;
+};
+
+FIXTURE_VARIANT_ADD(merge_with_fork, forked)
+{
+ .forked = true,
+};
+
+FIXTURE_VARIANT_ADD(merge_with_fork, unforked)
+{
+ .forked = false,
+};
+
+FIXTURE_SETUP(merge_with_fork)
+{
+ self->page_size = psize();
+ self->carveout = map_carveout(self->page_size);
+ ASSERT_NE(self->carveout, MAP_FAILED);
+ ASSERT_EQ(open_self_procmap(&self->procmap), 0);
+}
+
+FIXTURE_TEARDOWN(merge_with_fork)
+{
+ ASSERT_EQ(munmap(self->carveout, 30 * self->page_size), 0);
+ ASSERT_EQ(close_procmap(&self->procmap), 0);
+ /* See above. */
+ prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
+}
+
TEST_F(merge, mprotect_unfaulted_left)
{
unsigned int page_size = self->page_size;
@@ -322,8 +386,8 @@ TEST_F(merge, forked_target_vma)
unsigned int page_size = self->page_size;
char *carveout = self->carveout;
struct procmap_fd *procmap = &self->procmap;
- pid_t pid;
char *ptr, *ptr2;
+ pid_t pid;
int i;
/*
@@ -344,19 +408,10 @@ TEST_F(merge, forked_target_vma)
*/
ptr[0] = 'x';
- pid = fork();
+ pid = do_fork(&self->procmap);
ASSERT_NE(pid, -1);
-
- if (pid != 0) {
- wait(NULL);
+ if (pid != 0)
return;
- }
-
- /* Child process below: */
-
- /* Reopen for child. */
- ASSERT_EQ(close_procmap(&self->procmap), 0);
- ASSERT_EQ(open_self_procmap(&self->procmap), 0);
/* unCOWing everything does not cause the AVC to go away. */
for (i = 0; i < 5 * page_size; i += page_size)
@@ -386,8 +441,8 @@ TEST_F(merge, forked_source_vma)
unsigned int page_size = self->page_size;
char *carveout = self->carveout;
struct procmap_fd *procmap = &self->procmap;
- pid_t pid;
char *ptr, *ptr2;
+ pid_t pid;
int i;
/*
@@ -408,19 +463,10 @@ TEST_F(merge, forked_source_vma)
*/
ptr[0] = 'x';
- pid = fork();
+ pid = do_fork(&self->procmap);
ASSERT_NE(pid, -1);
-
- if (pid != 0) {
- wait(NULL);
+ if (pid != 0)
return;
- }
-
- /* Child process below: */
-
- /* Reopen for child. */
- ASSERT_EQ(close_procmap(&self->procmap), 0);
- ASSERT_EQ(open_self_procmap(&self->procmap), 0);
/* unCOWing everything does not cause the AVC to go away. */
for (i = 0; i < 5 * page_size; i += page_size)
@@ -1171,4 +1217,288 @@ TEST_F(merge, mremap_correct_placed_faulted)
ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
}
+TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev)
+{
+ struct procmap_fd *procmap = &self->procmap;
+ unsigned int page_size = self->page_size;
+ unsigned long offset;
+ char *ptr_a, *ptr_b;
+
+ /*
+ * mremap() such that A and B merge:
+ *
+ * |------------|
+ * | \ |
+ * |-----------| | / |---------|
+ * | unfaulted | v \ | faulted |
+ * |-----------| / |---------|
+ * B \ A
+ */
+
+ /* Map VMA A into place. */
+ ptr_a = mmap(&self->carveout[page_size + 3 * page_size],
+ 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+ /* Fault it in. */
+ ptr_a[0] = 'x';
+
+ if (variant->forked) {
+ pid_t pid = do_fork(&self->procmap);
+
+ ASSERT_NE(pid, -1);
+ if (pid != 0)
+ return;
+ }
+
+ /*
+ * Now move it out of the way so we can place VMA B in position,
+ * unfaulted.
+ */
+ ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /* Map VMA B into place. */
+ ptr_b = mmap(&self->carveout[page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /*
+ * Now move VMA A into position with MREMAP_DONTUNMAP to catch incorrect
+ * anon_vma propagation.
+ */
+ ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+ &self->carveout[page_size + 3 * page_size]);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /* The VMAs should have merged, if not forked. */
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
+
+ offset = variant->forked ? 3 * page_size : 6 * page_size;
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + offset);
+}
+
+TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_next)
+{
+ struct procmap_fd *procmap = &self->procmap;
+ unsigned int page_size = self->page_size;
+ unsigned long offset;
+ char *ptr_a, *ptr_b;
+
+ /*
+ * mremap() such that A and B merge:
+ *
+ * |---------------------------|
+ * | \ |
+ * | |-----------| / |---------|
+ * v | unfaulted | \ | faulted |
+ * |-----------| / |---------|
+ * B \ A
+ *
+ * Then unmap VMA A to trigger the bug.
+ */
+
+ /* Map VMA A into place. */
+ ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+ /* Fault it in. */
+ ptr_a[0] = 'x';
+
+ if (variant->forked) {
+ pid_t pid = do_fork(&self->procmap);
+
+ ASSERT_NE(pid, -1);
+ if (pid != 0)
+ return;
+ }
+
+ /*
+ * Now move it out of the way so we can place VMA B in position,
+ * unfaulted.
+ */
+ ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /* Map VMA B into place. */
+ ptr_b = mmap(&self->carveout[page_size + 3 * page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /*
+ * Now move VMA A into position with MREMAP_DONTUNMAP to catch incorrect
+ * anon_vma propagation.
+ */
+ ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+ &self->carveout[page_size]);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /* The VMAs should have merged, if not forked. */
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
+ offset = variant->forked ? 3 * page_size : 6 * page_size;
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + offset);
+}
+
+TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev_unfaulted_next)
+{
+ struct procmap_fd *procmap = &self->procmap;
+ unsigned int page_size = self->page_size;
+ unsigned long offset;
+ char *ptr_a, *ptr_b, *ptr_c;
+
+ /*
+ * mremap() with MREMAP_DONTUNMAP such that A, B and C merge:
+ *
+ * |---------------------------|
+ * | \ |
+ * |-----------| | |-----------| / |---------|
+ * | unfaulted | v | unfaulted | \ | faulted |
+ * |-----------| |-----------| / |---------|
+ * A C \ B
+ */
+
+ /* Map VMA B into place. */
+ ptr_b = mmap(&self->carveout[page_size + 3 * page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+ /* Fault it in. */
+ ptr_b[0] = 'x';
+
+ if (variant->forked) {
+ pid_t pid = do_fork(&self->procmap);
+
+ ASSERT_NE(pid, -1);
+ if (pid != 0)
+ return;
+ }
+
+ /*
+ * Now move it out of the way so we can place VMAs A, C in position,
+ * unfaulted.
+ */
+ ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /* Map VMA A into place. */
+
+ ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /* Map VMA C into place. */
+ ptr_c = mmap(&self->carveout[page_size + 3 * page_size + 3 * page_size],
+ 3 * page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_c, MAP_FAILED);
+
+ /*
+ * Now move VMA B into position with MREMAP_DONTUNMAP to catch incorrect
+ * anon_vma propagation.
+ */
+ ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+ &self->carveout[page_size + 3 * page_size]);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /* The VMAs should have merged, if not forked. */
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
+ offset = variant->forked ? 3 * page_size : 9 * page_size;
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + offset);
+
+ /* If forked, B and C should also not have merged. */
+ if (variant->forked) {
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + 3 * page_size);
+ }
+}
+
+TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev_faulted_next)
+{
+ struct procmap_fd *procmap = &self->procmap;
+ unsigned int page_size = self->page_size;
+ char *ptr_a, *ptr_b, *ptr_bc;
+
+ /*
+ * mremap() with MREMAP_DONTUNMAP such that A, B and C merge:
+ *
+ * |---------------------------|
+ * | \ |
+ * |-----------| | |-----------| / |---------|
+ * | unfaulted | v | faulted | \ | faulted |
+ * |-----------| |-----------| / |---------|
+ * A C \ B
+ */
+
+ /*
+ * Map VMA B and C into place. We have to map them together so their
+ * anon_vma is the same and the vma->vm_pgoff's are correctly aligned.
+ */
+ ptr_bc = mmap(&self->carveout[page_size + 3 * page_size],
+ 3 * page_size + 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_bc, MAP_FAILED);
+
+ /* Fault it in. */
+ ptr_bc[0] = 'x';
+
+ if (variant->forked) {
+ pid_t pid = do_fork(&self->procmap);
+
+ ASSERT_NE(pid, -1);
+ if (pid != 0)
+ return;
+ }
+
+ /*
+ * Now move VMA B out the way (splitting VMA BC) so we can place VMA A
+ * in position, unfaulted, and leave the remainder of the VMA we just
+ * moved in place, faulted, as VMA C.
+ */
+ ptr_b = mremap(ptr_bc, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /* Map VMA A into place. */
+ ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /*
+ * Now move VMA B into position with MREMAP_DONTUNMAP to catch incorrect
+ * anon_vma propagation.
+ */
+ ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+ &self->carveout[page_size + 3 * page_size]);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /* The VMAs should have merged. A,B,C if unforked, B, C if forked. */
+ if (variant->forked) {
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + 6 * page_size);
+ } else {
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + 9 * page_size);
+ }
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/vsock/util.c b/tools/testing/vsock/util.c
index d843643ced6b..9430ef5b8bc3 100644
--- a/tools/testing/vsock/util.c
+++ b/tools/testing/vsock/util.c
@@ -511,6 +511,18 @@ void run_tests(const struct test_case *test_cases,
printf("ok\n");
}
+
+ printf("All tests have been executed. Waiting other peer...");
+ fflush(stdout);
+
+ /*
+ * Final full barrier, to ensure that all tests have been run and
+ * that even the last one has been successful on both sides.
+ */
+ control_writeln("COMPLETED");
+ control_expectln("COMPLETED");
+
+ printf("ok\n");
}
void list_tests(const struct test_case *test_cases)