diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 8724c2c580b88..e88505e945d52 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5923,6 +5923,9 @@ rootflags= [KNL] Set root filesystem mount option string + initramfs_options= [KNL] + Specify mount options for for the initramfs mount. + rootfstype= [KNL] Set root filesystem type rootwait [KNL] Wait (indefinitely) for root device to show up. diff --git a/Documentation/admin-guide/laptops/lg-laptop.rst b/Documentation/admin-guide/laptops/lg-laptop.rst index 67fd6932cef4f..c4dd534f91edd 100644 --- a/Documentation/admin-guide/laptops/lg-laptop.rst +++ b/Documentation/admin-guide/laptops/lg-laptop.rst @@ -48,8 +48,8 @@ This value is reset to 100 when the kernel boots. Fan mode -------- -Writing 1/0 to /sys/devices/platform/lg-laptop/fan_mode disables/enables -the fan silent mode. +Writing 0/1/2 to /sys/devices/platform/lg-laptop/fan_mode sets fan mode to +Optimal/Silent/Performance respectively. USB charge diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index b42fea07c5cec..b6dacd012539a 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -198,6 +198,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-V3AE | #3312417 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-600 | #1076982,1209401| N/A | diff --git a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml index 5ac994b3c0aa1..b304bc5a08c40 100644 --- a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml +++ b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml @@ -57,11 +57,24 @@ required: - clocks - clock-names - '#phy-cells' - - power-domains - resets - reset-names - rockchip,grf +allOf: + - if: + properties: + compatible: + contains: + enum: + - rockchip,px30-csi-dphy + - rockchip,rk1808-csi-dphy + - rockchip,rk3326-csi-dphy + - rockchip,rk3368-csi-dphy + then: + required: + - power-domains + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml index 9ea1e4cd0709c..69be6affa9b53 100644 --- a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml @@ -85,13 +85,21 @@ required: - reg - "#address-cells" - "#size-cells" - - dma-ranges - ranges - clocks - clock-names - interrupts - power-domains +allOf: + - if: + properties: + compatible: + const: fsl,imx8mp-dwc3 + then: + required: + - dma-ranges + additionalProperties: false examples: diff --git a/Documentation/networking/seg6-sysctl.rst b/Documentation/networking/seg6-sysctl.rst index 07c20e470bafe..1b6af4779be11 100644 --- a/Documentation/networking/seg6-sysctl.rst +++ b/Documentation/networking/seg6-sysctl.rst @@ -25,6 +25,9 @@ seg6_require_hmac - INTEGER Default is 0. +/proc/sys/net/ipv6/seg6_* variables: +==================================== + seg6_flowlabel - INTEGER Controls the behaviour of computing the flowlabel of outer IPv6 header in case of SR T.encaps diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py index 5911bd0d79657..51a92b3718728 100644 --- a/Documentation/sphinx/kernel_abi.py +++ b/Documentation/sphinx/kernel_abi.py @@ -42,9 +42,11 @@ from docutils import nodes, statemachine from docutils.statemachine import ViewList from docutils.parsers.rst import directives, Directive -from docutils.utils.error_reporting import ErrorString from sphinx.util.docutils import switch_source_input +def ErrorString(exc): # Shamelessly stolen from docutils + return f'{exc.__class__.__name}: {exc}' + __version__ = '1.0' def setup(app): diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py index 03ace5f01b5c0..2db63dd203990 100644 --- a/Documentation/sphinx/kernel_feat.py +++ b/Documentation/sphinx/kernel_feat.py @@ -40,9 +40,11 @@ from docutils import nodes, statemachine from docutils.statemachine import ViewList from docutils.parsers.rst import directives, Directive -from docutils.utils.error_reporting import ErrorString from sphinx.util.docutils import switch_source_input +def ErrorString(exc): # Shamelessly stolen from docutils + return f'{exc.__class__.__name}: {exc}' + __version__ = '1.0' def setup(app): diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py index 6387624423363..ccbddcc4af792 100755 --- a/Documentation/sphinx/kernel_include.py +++ b/Documentation/sphinx/kernel_include.py @@ -34,13 +34,15 @@ import os.path from docutils import io, nodes, statemachine -from docutils.utils.error_reporting import SafeString, ErrorString from docutils.parsers.rst import directives from docutils.parsers.rst.directives.body import CodeBlock, NumberLines from docutils.parsers.rst.directives.misc import Include __version__ = '1.0' +def ErrorString(exc): # Shamelessly stolen from docutils + return f'{exc.__class__.__name}: {exc}' + # ============================================================================== def setup(app): # ============================================================================== @@ -111,7 +113,7 @@ def _run(self): raise self.severe('Problems with "%s" directive path:\n' 'Cannot encode input file path "%s" ' '(wrong locale?).' % - (self.name, SafeString(path))) + (self.name, path)) except IOError as error: raise self.severe('Problems with "%s" directive path:\n%s.' % (self.name, ErrorString(error))) diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py index dcad0fff4723e..496489d634c4f 100755 --- a/Documentation/sphinx/maintainers_include.py +++ b/Documentation/sphinx/maintainers_include.py @@ -22,10 +22,12 @@ import os.path from docutils import statemachine -from docutils.utils.error_reporting import ErrorString from docutils.parsers.rst import Directive from docutils.parsers.rst.directives.misc import Include +def ErrorString(exc): # Shamelessly stolen from docutils + return f'{exc.__class__.__name}: {exc}' + __version__ = '1.0' def setup(app): diff --git a/Documentation/trace/histogram-design.rst b/Documentation/trace/histogram-design.rst index 5765eb3e9efa7..a30f4bed11b4e 100644 --- a/Documentation/trace/histogram-design.rst +++ b/Documentation/trace/histogram-design.rst @@ -380,7 +380,9 @@ entry, ts0, corresponding to the ts0 variable in the sched_waking trigger above. sched_waking histogram -----------------------:: +---------------------- + +.. code-block:: +------------------+ | hist_data |<-------------------------------------------------------+ diff --git a/Makefile b/Makefile index 66ae67c52da81..9ded15dbcf3d7 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 12 -SUBLEVEL = 49 +SUBLEVEL = 57 EXTRAVERSION = NAME = Baby Opossum Posse @@ -1372,11 +1372,11 @@ endif tools/: FORCE $(Q)mkdir -p $(objtree)/tools - $(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ + $(Q)$(MAKE) O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ tools/%: FORCE $(Q)mkdir -p $(objtree)/tools - $(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* + $(Q)$(MAKE) O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* # --------------------------------------------------------------------------- # Kernel selftest diff --git a/arch/Kconfig b/arch/Kconfig index bd9f095d69fa0..593452b43dd49 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -861,6 +861,7 @@ config HAVE_CFI_ICALL_NORMALIZE_INTEGERS_RUSTC def_bool y depends on HAVE_CFI_ICALL_NORMALIZE_INTEGERS_CLANG depends on RUSTC_VERSION >= 107900 + depends on ARM64 || X86_64 # With GCOV/KASAN we need this fix: https://github.com/rust-lang/rust/pull/129373 depends on (RUSTC_LLVM_VERSION >= 190103 && RUSTC_VERSION >= 108200) || \ (!GCOV_KERNEL && !KASAN_GENERIC && !KASAN_SW_TAGS) diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c index e9dad60b147f3..1ebb058904992 100644 --- a/arch/alpha/kernel/asm-offsets.c +++ b/arch/alpha/kernel/asm-offsets.c @@ -4,6 +4,7 @@ * This code generates raw asm output which is post-processed to extract * and format the required data. */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c index f77deb7991757..2978da85fcb65 100644 --- a/arch/arc/kernel/asm-offsets.c +++ b/arch/arc/kernel/asm-offsets.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_sodia.dts b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_sodia.dts index ce0d6514eeb57..e4794ccb8e413 100644 --- a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_sodia.dts +++ b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_sodia.dts @@ -66,8 +66,10 @@ mdio0 { #address-cells = <1>; #size-cells = <0>; - phy0: ethernet-phy@0 { - reg = <0>; + compatible = "snps,dwmac-mdio"; + + phy0: ethernet-phy@4 { + reg = <4>; rxd0-skew-ps = <0>; rxd1-skew-ps = <0>; rxd2-skew-ps = <0>; diff --git a/arch/arm/boot/dts/marvell/kirkwood-openrd-client.dts b/arch/arm/boot/dts/marvell/kirkwood-openrd-client.dts index d4e0b8150a84c..cf26e2ceaaa07 100644 --- a/arch/arm/boot/dts/marvell/kirkwood-openrd-client.dts +++ b/arch/arm/boot/dts/marvell/kirkwood-openrd-client.dts @@ -38,7 +38,7 @@ simple-audio-card,mclk-fs = <256>; simple-audio-card,cpu { - sound-dai = <&audio0 0>; + sound-dai = <&audio0>; }; simple-audio-card,codec { diff --git a/arch/arm/boot/dts/renesas/r8a7791-porter.dts b/arch/arm/boot/dts/renesas/r8a7791-porter.dts index 93c86e9216455..b255eb228dd74 100644 --- a/arch/arm/boot/dts/renesas/r8a7791-porter.dts +++ b/arch/arm/boot/dts/renesas/r8a7791-porter.dts @@ -290,7 +290,7 @@ }; can0_pins: can0 { - groups = "can0_data"; + groups = "can0_data_b"; function = "can0"; }; diff --git a/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi b/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi index a4beb718559c4..9ee9e7a1343c4 100644 --- a/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi +++ b/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi @@ -270,7 +270,7 @@ vcc7-supply = <&vbat>; vccio-supply = <&vbat>; - ti,en-ck32k-xtal = <1>; + ti,en-ck32k-xtal; regulators { vrtc_reg: regulator@0 { diff --git a/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts b/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts index 06767ea164b59..ece7f7854f6aa 100644 --- a/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts +++ b/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts @@ -483,8 +483,6 @@ status = "okay"; op-mode = <0>; /* MCASP_IIS_MODE */ tdm-slots = <2>; - /* 16 serializers */ - num-serializer = <16>; serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */ 0 0 2 1 0 0 0 0 0 0 0 0 0 0 0 0 >; diff --git a/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi b/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi index a7f99ae0c1fe9..78c657429f641 100644 --- a/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi +++ b/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi @@ -65,7 +65,7 @@ ti,debounce-max = /bits/ 16 <10>; ti,debounce-tol = /bits/ 16 <5>; ti,debounce-rep = /bits/ 16 <1>; - ti,keep-vref-on = <1>; + ti,keep-vref-on; ti,settle-delay-usec = /bits/ 16 <150>; wakeup-source; diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 4853875740d0f..d9f129c584b1d 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -7,6 +7,8 @@ * This code generates raw asm output which is post-processed to extract * and format the required data. */ +#define COMPILE_OFFSETS + #include #include #include diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S index e5869cca5e791..94dece1839af3 100644 --- a/arch/arm/mach-at91/pm_suspend.S +++ b/arch/arm/mach-at91/pm_suspend.S @@ -872,7 +872,7 @@ e_done: /** * at91_mckx_ps_restore: restore MCK1..4 settings * - * Side effects: overwrites tmp1, tmp2 + * Side effects: overwrites tmp1, tmp2 and tmp3 */ .macro at91_mckx_ps_restore #ifdef CONFIG_SOC_SAMA7 @@ -916,7 +916,7 @@ r_ps: bic tmp3, tmp3, #AT91_PMC_MCR_V2_ID_MSK orr tmp3, tmp3, tmp1 orr tmp3, tmp3, #AT91_PMC_MCR_V2_CMD - str tmp2, [pmc, #AT91_PMC_MCR_V2] + str tmp3, [pmc, #AT91_PMC_MCR_V2] wait_mckrdy tmp1 diff --git a/arch/arm/mach-omap2/am33xx-restart.c b/arch/arm/mach-omap2/am33xx-restart.c index fcf3d557aa786..3cdf223addcc2 100644 --- a/arch/arm/mach-omap2/am33xx-restart.c +++ b/arch/arm/mach-omap2/am33xx-restart.c @@ -2,12 +2,46 @@ /* * am33xx-restart.c - Code common to all AM33xx machines. */ +#include +#include #include #include #include "common.h" +#include "control.h" #include "prm.h" +/* + * Advisory 1.0.36 EMU0 and EMU1: Terminals Must be Pulled High Before + * ICEPick Samples + * + * If EMU0/EMU1 pins have been used as GPIO outputs and actively driving low + * level, the device might not reboot in normal mode. We are in a bad position + * to override GPIO state here, so just switch the pins into EMU input mode + * (that's what reset will do anyway) and wait a bit, because the state will be + * latched 190 ns after reset. + */ +static void am33xx_advisory_1_0_36(void) +{ + u32 emu0 = omap_ctrl_readl(AM335X_PIN_EMU0); + u32 emu1 = omap_ctrl_readl(AM335X_PIN_EMU1); + + /* If both pins are in EMU mode, nothing to do */ + if (!(emu0 & 7) && !(emu1 & 7)) + return; + + /* Switch GPIO3_7/GPIO3_8 into EMU0/EMU1 modes respectively */ + omap_ctrl_writel(emu0 & ~7, AM335X_PIN_EMU0); + omap_ctrl_writel(emu1 & ~7, AM335X_PIN_EMU1); + + /* + * Give pull-ups time to load the pin/PCB trace capacity. + * 5 ms shall be enough to load 1 uF (would be huge capacity for these + * pins) with TI-recommended 4k7 external pull-ups. + */ + mdelay(5); +} + /** * am33xx_restart - trigger a software restart of the SoC * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c @@ -18,6 +52,8 @@ */ void am33xx_restart(enum reboot_mode mode, const char *cmd) { + am33xx_advisory_1_0_36(); + /* TODO: Handle cmd if necessary */ prm_reboot_mode = mode; diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c index c907478be196e..4abb86dc98fda 100644 --- a/arch/arm/mach-omap2/pm33xx-core.c +++ b/arch/arm/mach-omap2/pm33xx-core.c @@ -388,12 +388,15 @@ static int __init amx3_idle_init(struct device_node *cpu_node, int cpu) if (!state_node) break; - if (!of_device_is_available(state_node)) + if (!of_device_is_available(state_node)) { + of_node_put(state_node); continue; + } if (i == CPUIDLE_STATE_MAX) { pr_warn("%s: cpuidle states reached max possible\n", __func__); + of_node_put(state_node); break; } @@ -403,6 +406,7 @@ static int __init amx3_idle_init(struct device_node *cpu_node, int cpu) states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 | WFI_FLAG_FLUSH_CACHE; + of_node_put(state_node); state_count++; } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7887d18cce3e4..40ae4dd961b15 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1111,6 +1111,7 @@ config ARM64_ERRATUM_3194386 * ARM Neoverse-V1 erratum 3324341 * ARM Neoverse V2 erratum 3324336 * ARM Neoverse-V3 erratum 3312417 + * ARM Neoverse-V3AE erratum 3312417 On affected cores "MSR SSBS, #0" instructions may not affect subsequent speculative instructions, which may permit unexepected diff --git a/arch/arm64/boot/dts/apple/t8103-j457.dts b/arch/arm64/boot/dts/apple/t8103-j457.dts index 152f95fd49a21..7089ccf3ce556 100644 --- a/arch/arm64/boot/dts/apple/t8103-j457.dts +++ b/arch/arm64/boot/dts/apple/t8103-j457.dts @@ -21,6 +21,14 @@ }; }; +/* + * Adjust pcie0's iommu-map to account for the disabled port01. + */ +&pcie0 { + iommu-map = <0x100 &pcie0_dart_0 1 1>, + <0x200 &pcie0_dart_2 1 1>; +}; + &bluetooth0 { brcm,board-type = "apple,santorini"; }; @@ -36,10 +44,10 @@ */ &port02 { - bus-range = <3 3>; + bus-range = <2 2>; status = "okay"; ethernet0: ethernet@0,0 { - reg = <0x30000 0x0 0x0 0x0 0x0>; + reg = <0x20000 0x0 0x0 0x0 0x0>; /* To be filled by the loader */ local-mac-address = [00 10 18 00 00 00]; }; diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi index 447bfa060918c..209f99b1ceae7 100644 --- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi +++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi @@ -263,6 +263,9 @@ <0x7fffc000 0x2000>, <0x7fffe000 0x2000>; interrupt-controller; + #address-cells = <0>; + interrupts = ; #interrupt-cells = <3>; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi index 40e847bc0b7f8..62cf525ab714b 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi @@ -283,7 +283,7 @@ cpu-thermal { polling-delay-passive = <250>; polling-delay = <2000>; - thermal-sensors = <&tmu 0>; + thermal-sensors = <&tmu 1>; trips { cpu_alert0: trip0 { temperature = <85000>; @@ -313,7 +313,7 @@ soc-thermal { polling-delay-passive = <250>; polling-delay = <2000>; - thermal-sensors = <&tmu 1>; + thermal-sensors = <&tmu 0>; trips { soc_alert0: trip0 { temperature = <85000>; diff --git a/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts index 89e97c604bd3e..c3d2ddd887fdf 100644 --- a/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts +++ b/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts @@ -33,7 +33,9 @@ reg_vcc_panel: regulator-vcc-panel { compatible = "regulator-fixed"; - gpio = <&gpio4 3 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_vcc_panel>; + gpio = <&gpio2 21 GPIO_ACTIVE_HIGH>; enable-active-high; regulator-max-microvolt = <3300000>; regulator-min-microvolt = <3300000>; @@ -135,6 +137,16 @@ }; &usbotg1 { + adp-disable; + hnp-disable; + srp-disable; + disable-over-current; + dr_mode = "otg"; + usb-role-switch; + status = "okay"; +}; + +&usbotg2 { #address-cells = <1>; #size-cells = <0>; disable-over-current; @@ -147,17 +159,15 @@ }; }; -&usbotg2 { - adp-disable; - hnp-disable; - srp-disable; - disable-over-current; - dr_mode = "otg"; - usb-role-switch; - status = "okay"; -}; - &usdhc2 { vmmc-supply = <®_vdd_3v3>; status = "okay"; }; + +&iomuxc { + pinctrl_reg_vcc_panel: regvccpanelgrp { + fsl,pins = < + MX93_PAD_GPIO_IO21__GPIO2_IO21 0x31e /* PWM_2 */ + >; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi index 7365d6538a733..ddbc94c375e0c 100644 --- a/arch/arm64/boot/dts/freescale/imx95.dtsi +++ b/arch/arm64/boot/dts/freescale/imx95.dtsi @@ -822,7 +822,7 @@ interrupts = ; clocks = <&scmi_clk IMX95_CLK_LPUART7>; clock-names = "ipg"; - dmas = <&edma2 26 0 FSL_EDMA_RX>, <&edma2 25 0 0>; + dmas = <&edma2 88 0 FSL_EDMA_RX>, <&edma2 87 0 0>; dma-names = "rx", "tx"; status = "disabled"; }; @@ -834,7 +834,7 @@ interrupts = ; clocks = <&scmi_clk IMX95_CLK_LPUART8>; clock-names = "ipg"; - dmas = <&edma2 28 0 FSL_EDMA_RX>, <&edma2 27 0 0>; + dmas = <&edma2 90 0 FSL_EDMA_RX>, <&edma2 89 0 0>; dma-names = "rx", "tx"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts b/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts index 0f53745a6fa0d..41166f865f87c 100644 --- a/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts +++ b/arch/arm64/boot/dts/marvell/cn9132-clearfog.dts @@ -413,7 +413,13 @@ /* SRDS #0,#1,#2,#3 - PCIe */ &cp0_pcie0 { num-lanes = <4>; - phys = <&cp0_comphy0 0>, <&cp0_comphy1 0>, <&cp0_comphy2 0>, <&cp0_comphy3 0>; + /* + * The mvebu-comphy driver does not currently know how to pass correct + * lane-count to ATF while configuring the serdes lanes. + * Rely on bootloader configuration only. + * + * phys = <&cp0_comphy0 0>, <&cp0_comphy1 0>, <&cp0_comphy2 0>, <&cp0_comphy3 0>; + */ status = "okay"; }; @@ -475,7 +481,13 @@ /* SRDS #0,#1 - PCIe */ &cp1_pcie0 { num-lanes = <2>; - phys = <&cp1_comphy0 0>, <&cp1_comphy1 0>; + /* + * The mvebu-comphy driver does not currently know how to pass correct + * lane-count to ATF while configuring the serdes lanes. + * Rely on bootloader configuration only. + * + * phys = <&cp1_comphy0 0>, <&cp1_comphy1 0>; + */ status = "okay"; }; diff --git a/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi b/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi index afc041c1c448c..bb2bb47fd77c1 100644 --- a/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi +++ b/arch/arm64/boot/dts/marvell/cn9132-sr-cex7.dtsi @@ -137,6 +137,14 @@ pinctrl-0 = <&ap_mmc0_pins>; pinctrl-names = "default"; vqmmc-supply = <&v_1_8>; + /* + * Not stable in HS modes - phy needs "more calibration", so disable + * UHS (by preventing voltage switch), SDR104, SDR50 and DDR50 modes. + */ + no-1-8-v; + no-sd; + no-sdio; + non-removable; status = "okay"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt6331.dtsi b/arch/arm64/boot/dts/mediatek/mt6331.dtsi index d89858c73ab1b..243afbffa21fd 100644 --- a/arch/arm64/boot/dts/mediatek/mt6331.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6331.dtsi @@ -6,12 +6,12 @@ #include &pwrap { - pmic: mt6331 { + pmic: pmic { compatible = "mediatek,mt6331"; interrupt-controller; #interrupt-cells = <2>; - mt6331regulator: mt6331regulator { + mt6331regulator: regulators { compatible = "mediatek,mt6331-regulator"; mt6331_vdvfs11_reg: buck-vdvfs11 { @@ -258,7 +258,7 @@ }; mt6331_vdig18_reg: ldo-vdig18 { - regulator-name = "dvdd18_dig"; + regulator-name = "vdig18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-ramp-delay = <0>; @@ -266,11 +266,11 @@ }; }; - mt6331rtc: mt6331rtc { + mt6331rtc: rtc { compatible = "mediatek,mt6331-rtc"; }; - mt6331keys: mt6331keys { + mt6331keys: keys { compatible = "mediatek,mt6331-keys"; power { linux,keycodes = ; diff --git a/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts b/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts index 91de920c22457..03cc48321a3f4 100644 --- a/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts +++ b/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts @@ -212,7 +212,7 @@ &mmc0 { /* eMMC controller */ - mediatek,latch-ck = <0x14>; /* hs400 */ + mediatek,latch-ck = <4>; /* hs400 */ mediatek,hs200-cmd-int-delay = <1>; mediatek,hs400-cmd-int-delay = <1>; mediatek,hs400-ds-dly3 = <0x1a>; diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi index 7c971198fa956..72a2a2bff0a93 100644 --- a/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi @@ -71,14 +71,14 @@ i2c-scl-internal-delay-ns = <10000>; touchscreen: touchscreen@10 { - compatible = "hid-over-i2c"; + compatible = "elan,ekth6915"; reg = <0x10>; interrupts-extended = <&pio 12 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&touchscreen_pins>; - post-power-on-delay-ms = <10>; - hid-descr-addr = <0x0001>; - vdd-supply = <&pp3300_s3>; + reset-gpios = <&pio 60 GPIO_ACTIVE_LOW>; + vcc33-supply = <&pp3300_s3>; + no-reset-on-power-off; }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts index 26d3451a5e47c..24d9ede63eaa2 100644 --- a/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts +++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts @@ -42,3 +42,7 @@ CROS_STD_MAIN_KEYMAP >; }; + +&touchscreen { + compatible = "elan,ekth6a12nay"; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi index 2e138b54f5563..451aa278bef50 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi @@ -1563,9 +1563,6 @@ power-domains = <&spm MT8195_POWER_DOMAIN_PCIE_MAC_P0>; - resets = <&infracfg_ao MT8195_INFRA_RST2_PCIE_P0_SWRST>; - reset-names = "mac"; - #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc0 0>, diff --git a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts index e2e75b8ff9188..9ab4fee769e40 100644 --- a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts +++ b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts @@ -351,7 +351,7 @@ LDO_VIN2-supply = <&vsys>; LDO_VIN3-supply = <&vsys>; - mt6360_buck1: BUCK1 { + mt6360_buck1: buck1 { regulator-name = "emi_vdd2"; regulator-min-microvolt = <600000>; regulator-max-microvolt = <1800000>; @@ -361,7 +361,7 @@ regulator-always-on; }; - mt6360_buck2: BUCK2 { + mt6360_buck2: buck2 { regulator-name = "emi_vddq"; regulator-min-microvolt = <300000>; regulator-max-microvolt = <1300000>; @@ -371,7 +371,7 @@ regulator-always-on; }; - mt6360_ldo1: LDO1 { + mt6360_ldo1: ldo1 { regulator-name = "mt6360_ldo1"; /* Test point */ regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3600000>; @@ -379,7 +379,7 @@ MT6360_OPMODE_LP>; }; - mt6360_ldo2: LDO2 { + mt6360_ldo2: ldo2 { regulator-name = "panel1_p1v8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -387,7 +387,7 @@ MT6360_OPMODE_LP>; }; - mt6360_ldo3: LDO3 { + mt6360_ldo3: ldo3 { regulator-name = "vmc_pmu"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; @@ -395,7 +395,7 @@ MT6360_OPMODE_LP>; }; - mt6360_ldo5: LDO5 { + mt6360_ldo5: ldo5 { regulator-name = "vmch_pmu"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; @@ -403,7 +403,7 @@ MT6360_OPMODE_LP>; }; - mt6360_ldo6: LDO6 { + mt6360_ldo6: ldo6 { regulator-name = "mt6360_ldo6"; /* Test point */ regulator-min-microvolt = <500000>; regulator-max-microvolt = <2100000>; @@ -411,7 +411,7 @@ MT6360_OPMODE_LP>; }; - mt6360_ldo7: LDO7 { + mt6360_ldo7: ldo7 { regulator-name = "emi_vmddr_en"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; diff --git a/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts index cce642c538128..3d3db33a64dc6 100644 --- a/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts +++ b/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts @@ -11,7 +11,7 @@ / { model = "Pumpkin MT8516"; - compatible = "mediatek,mt8516"; + compatible = "mediatek,mt8516-pumpkin", "mediatek,mt8516"; memory@40000000 { device_type = "memory"; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 800bfe83dbf83..b0f1c1b2e6f30 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -1540,6 +1540,8 @@ interrupts = ; + resets = <&gcc GCC_MDSS_BCR>; + interrupt-controller; #interrupt-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi index effa3aaeb2505..39493f75c0dce 100644 --- a/arch/arm64/boot/dts/qcom/msm8939.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi @@ -1218,6 +1218,8 @@ power-domains = <&gcc MDSS_GDSC>; + resets = <&gcc GCC_MDSS_BCR>; + #address-cells = <1>; #size-cells = <1>; #interrupt-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi index 2cfdf5bd5fd9b..e75e6354b2d52 100644 --- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi +++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi @@ -1405,6 +1405,7 @@ snps,has-lpm-erratum; snps,hird-threshold = /bits/ 8 <0x10>; snps,usb3_lpm_capable; + snps,parkmode-disable-ss-quirk; maximum-speed = "super-speed"; dr_mode = "otg"; usb-role-switch; diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 9bf7a405a964c..54956ae1f6745 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -5365,11 +5365,11 @@ compatible = "qcom,bam-v1.7.4", "qcom,bam-v1.7.0"; qcom,controlled-remotely; reg = <0 0x17184000 0 0x2a000>; - num-channels = <31>; + num-channels = <23>; interrupts = ; #dma-cells = <1>; qcom,ee = <1>; - qcom,num-ees = <2>; + qcom,num-ees = <4>; iommus = <&apps_smmu 0x1806 0x0>; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi index 5b54ee79f048e..9986a10ae0068 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi +++ b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi @@ -475,6 +475,8 @@ #address-cells = <1>; #size-cells = <0>; + status = "disabled"; + pm8010_temp_alarm: temp-alarm@2400 { compatible = "qcom,spmi-temp-alarm"; reg = <0x2400>; diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi index 377849cbb462e..5785a934c28bf 100644 --- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi @@ -48,7 +48,10 @@ #if (SW_SCIF_CAN || SW_RSPI_CAN) &canfd { pinctrl-0 = <&can1_pins>; - /delete-node/ channel@0; + + channel0 { + status = "disabled"; + }; }; #else &canfd { diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi index 45d68a0d1b593..c4857bfbeb14e 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi @@ -258,7 +258,7 @@ main_pmx0: pinctrl@f4000 { compatible = "pinctrl-single"; - reg = <0x00 0xf4000 0x00 0x2ac>; + reg = <0x00 0xf4000 0x00 0x25c>; #pinctrl-cells = <1>; pinctrl-single,register-width = <32>; pinctrl-single,function-mask = <0xffffffff>; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index d92a0203e5a93..c279a0a9b3660 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -93,6 +93,7 @@ #define ARM_CPU_PART_NEOVERSE_V2 0xD4F #define ARM_CPU_PART_CORTEX_A720 0xD81 #define ARM_CPU_PART_CORTEX_X4 0xD82 +#define ARM_CPU_PART_NEOVERSE_V3AE 0xD83 #define ARM_CPU_PART_NEOVERSE_V3 0xD84 #define ARM_CPU_PART_CORTEX_X925 0xD85 #define ARM_CPU_PART_CORTEX_A725 0xD87 @@ -180,6 +181,7 @@ #define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2) #define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720) #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4) +#define MIDR_NEOVERSE_V3AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3AE) #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925) #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5ba8376735cb0..eb57ddb5ecc53 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -212,7 +212,8 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) static inline pte_t pte_mkwrite_novma(pte_t pte) { pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); - pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); + if (pte_sw_dirty(pte)) + pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); return pte; } diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index b21dd24b8efc3..020e01181a0f1 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -6,6 +6,7 @@ * 2001-2002 Keith Owens * Copyright (C) 2012 ARM Ltd. */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index a78f247029aec..3f675ae57d09a 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -455,6 +455,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = { MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3AE), {} }; #endif diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 9ca5ffd8d817f..5e68d65e675e5 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2279,17 +2279,21 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused) #ifdef CONFIG_ARM64_MTE static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) { + static bool cleared_zero_page = false; + sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0); mte_cpu_setup(); /* * Clear the tags in the zero page. This needs to be done via the - * linear map which has the Tagged attribute. + * linear map which has the Tagged attribute. Since this page is + * always mapped as pte_special(), set_pte_at() will not attempt to + * clear the tags or set PG_mte_tagged. */ - if (try_page_mte_tagging(ZERO_PAGE(0))) { + if (!cleared_zero_page) { + cleared_zero_page = true; mte_clear_page_tags(lm_alias(empty_zero_page)); - set_page_mte_tagged(ZERO_PAGE(0)); } kasan_init_hw_tags_cpu(); diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 6174671be7c18..5d63ca9667370 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -428,7 +428,8 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, put_page(page); break; } - WARN_ON_ONCE(!page_mte_tagged(page)); + + WARN_ON_ONCE(!page_mte_tagged(page) && !is_zero_page(page)); /* limit access to the end of the page */ offset = offset_in_page(addr); diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c index e57b043f324b5..adec61b8b278a 100644 --- a/arch/arm64/kernel/pi/map_kernel.c +++ b/arch/arm64/kernel/pi/map_kernel.c @@ -78,6 +78,12 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level) twopass |= enable_scs; prot = twopass ? data_prot : text_prot; + /* + * [_stext, _text) isn't executed after boot and contains some + * non-executable, unpredictable data, so map it non-executable. + */ + map_segment(init_pg_dir, &pgdp, va_offset, _text, _stext, data_prot, + false, root_level); map_segment(init_pg_dir, &pgdp, va_offset, _stext, _etext, prot, !twopass, root_level); map_segment(init_pg_dir, &pgdp, va_offset, __start_rodata, diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 4268678d0e86c..6e397d8dcd4c2 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) "kprobes: " fmt +#include #include #include #include @@ -41,6 +42,17 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); +void *alloc_insn_page(void) +{ + void *addr; + + addr = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE); + if (!addr) + return NULL; + set_memory_rox((unsigned long)addr, 1); + return addr; +} + static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { kprobe_opcode_t *addr = p->ainsn.api.insn; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 87f61fd6783c2..8185979f0f116 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -213,7 +213,7 @@ static void __init request_standard_resources(void) unsigned long i = 0; size_t res_size; - kernel_code.start = __pa_symbol(_stext); + kernel_code.start = __pa_symbol(_text); kernel_code.end = __pa_symbol(__init_begin - 1); kernel_data.start = __pa_symbol(_sdata); kernel_data.end = __pa_symbol(_end - 1); @@ -281,7 +281,7 @@ u64 cpu_logical_map(unsigned int cpu) void __init __no_sanitize_address setup_arch(char **cmdline_p) { - setup_initial_init_mm(_stext, _etext, _edata, _end); + setup_initial_init_mm(_text, _etext, _edata, _end); *cmdline_p = boot_command_line; diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index a7bb20055ce09..9e734d6314e03 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -25,8 +25,13 @@ void copy_highpage(struct page *to, struct page *from) page_kasan_tag_reset(to); if (system_supports_mte() && page_mte_tagged(from)) { - /* It's a new page, shouldn't have been tagged yet */ - WARN_ON_ONCE(!try_page_mte_tagging(to)); + /* + * Most of the time it's a new page that shouldn't have been + * tagged yet. However, folio migration can end up reusing the + * same page without untagging it. Ignore the warning if the + * page is already tagged. + */ + try_page_mte_tagging(to); mte_copy_page_tags(kto, kfrom); set_page_mte_tagged(to); } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 93ba66de160ce..c59aa6df14db7 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -300,7 +300,7 @@ void __init arm64_memblock_init(void) * Register the kernel text, kernel data, initrd, and initial * pagetables with memblock. */ - memblock_reserve(__pa_symbol(_stext), _end - _stext); + memblock_reserve(__pa_symbol(_text), _end - _text); if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { /* the generic initrd code expects virtual addresses */ initrd_start = __phys_to_virt(phys_initrd_start); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index aed8d32979d9c..ea80e271301ed 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -561,8 +561,8 @@ void __init mark_linear_text_alias_ro(void) /* * Remove the write permissions from the linear alias of .text/.rodata */ - update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext), - (unsigned long)__init_begin - (unsigned long)_stext, + update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), + (unsigned long)__init_begin - (unsigned long)_text, PAGE_KERNEL_RO); } @@ -623,7 +623,7 @@ static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { static void __init map_mem(pgd_t *pgdp) { static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN); - phys_addr_t kernel_start = __pa_symbol(_stext); + phys_addr_t kernel_start = __pa_symbol(_text); phys_addr_t kernel_end = __pa_symbol(__init_begin); phys_addr_t start, end; phys_addr_t early_kfence_pool; @@ -670,7 +670,7 @@ static void __init map_mem(pgd_t *pgdp) } /* - * Map the linear alias of the [_stext, __init_begin) interval + * Map the linear alias of the [_text, __init_begin) interval * as non-executable now, and remove the write permission in * mark_linear_text_alias_ro() below (which will be called after * alternative patching has completed). This makes the contents @@ -697,6 +697,10 @@ void mark_rodata_ro(void) WRITE_ONCE(rodata_is_rw, false); update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, section_size, PAGE_KERNEL_RO); + /* mark the range between _text and _stext as read only. */ + update_mapping_prot(__pa_symbol(_text), (unsigned long)_text, + (unsigned long)_stext - (unsigned long)_text, + PAGE_KERNEL_RO); } static void __init declare_vma(struct vm_struct *vma, @@ -767,7 +771,7 @@ static void __init declare_kernel_vmas(void) { static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT]; - declare_vma(&vmlinux_seg[0], _stext, _etext, VM_NO_GUARD); + declare_vma(&vmlinux_seg[0], _text, _etext, VM_NO_GUARD); declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD); declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD); declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD); diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 5553508c36440..ca6d002a6f137 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2754,8 +2754,7 @@ void bpf_jit_free(struct bpf_prog *prog) * before freeing it. */ if (jit_data) { - bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size, - sizeof(jit_data->header->size)); + bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header); kfree(jit_data); } hdr = bpf_jit_binary_pack_hdr(prog); diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 362bcfa0aed18..5127d3d3b8677 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1213,7 +1213,7 @@ UnsignedEnum 43:40 TraceFilt 0b0000 NI 0b0001 IMP EndEnum -UnsignedEnum 39:36 DoubleLock +SignedEnum 39:36 DoubleLock 0b0000 IMP 0b1111 NI EndEnum @@ -1861,7 +1861,7 @@ UnsignedEnum 11:8 ASID2 0b0000 NI 0b0001 IMP EndEnum -SignedEnum 7:4 EIESB +UnsignedEnum 7:4 EIESB 0b0000 NI 0b0001 ToEL3 0b0010 ToELx diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c index d1e9035794733..5525c8e7e1d9e 100644 --- a/arch/csky/kernel/asm-offsets.c +++ b/arch/csky/kernel/asm-offsets.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. +#define COMPILE_OFFSETS #include #include diff --git a/arch/hexagon/kernel/asm-offsets.c b/arch/hexagon/kernel/asm-offsets.c index 03a7063f94561..50eea9fa6f137 100644 --- a/arch/hexagon/kernel/asm-offsets.c +++ b/arch/hexagon/kernel/asm-offsets.c @@ -8,6 +8,7 @@ * * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 567bd122a9ee4..c14ae21075c5f 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -114,7 +114,7 @@ KBUILD_RUSTFLAGS_KERNEL += -Crelocation-model=pie LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs) endif -cflags-y += $(call cc-option, -mno-check-zero-division) +cflags-y += $(call cc-option, -mno-check-zero-division -fno-isolate-erroneous-paths-dereference) ifndef CONFIG_KASAN cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index bee9f7a3108f0..d20d71d4bcae6 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -4,6 +4,8 @@ * * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#define COMPILE_OFFSETS + #include #include #include diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index 50c469067f3aa..b5e2312a2fca5 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -166,6 +166,10 @@ static inline __init bool kaslr_disabled(void) return true; #endif + str = strstr(boot_command_line, "kexec_file"); + if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) + return true; + return false; } diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 1fa6a604734ef..2ceb198ae8c80 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -354,6 +354,7 @@ void __init platform_init(void) #ifdef CONFIG_ACPI acpi_table_upgrade(); + acpi_gbl_use_global_lock = false; acpi_gbl_use_default_register_widths = false; acpi_boot_table_init(); #endif diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 14c64a6f12176..50ec92651d5a5 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -350,12 +350,12 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, #include #else -static inline int find_first_zero_bit(const unsigned long *vaddr, - unsigned size) +static inline unsigned long find_first_zero_bit(const unsigned long *vaddr, + unsigned long size) { const unsigned long *p = vaddr; - int res = 32; - unsigned int words; + unsigned long res = 32; + unsigned long words; unsigned long num; if (!size) @@ -376,8 +376,9 @@ static inline int find_first_zero_bit(const unsigned long *vaddr, } #define find_first_zero_bit find_first_zero_bit -static inline int find_next_zero_bit(const unsigned long *vaddr, int size, - int offset) +static inline unsigned long find_next_zero_bit(const unsigned long *vaddr, + unsigned long size, + unsigned long offset) { const unsigned long *p = vaddr + (offset >> 5); int bit = offset & 31UL, res; @@ -406,11 +407,12 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size, } #define find_next_zero_bit find_next_zero_bit -static inline int find_first_bit(const unsigned long *vaddr, unsigned size) +static inline unsigned long find_first_bit(const unsigned long *vaddr, + unsigned long size) { const unsigned long *p = vaddr; - int res = 32; - unsigned int words; + unsigned long res = 32; + unsigned long words; unsigned long num; if (!size) @@ -431,8 +433,9 @@ static inline int find_first_bit(const unsigned long *vaddr, unsigned size) } #define find_first_bit find_first_bit -static inline int find_next_bit(const unsigned long *vaddr, int size, - int offset) +static inline unsigned long find_next_bit(const unsigned long *vaddr, + unsigned long size, + unsigned long offset) { const unsigned long *p = vaddr + (offset >> 5); int bit = offset & 31UL, res; diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index 906d732305374..67a1990f9d748 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c @@ -9,6 +9,7 @@ * #defines from the assembly-language output. */ +#define COMPILE_OFFSETS #define ASM_OFFSETS_C #include diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c index 104c3ac5f30c8..b4b67d58e7f6a 100644 --- a/arch/microblaze/kernel/asm-offsets.c +++ b/arch/microblaze/kernel/asm-offsets.c @@ -7,6 +7,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index cb1045ebab062..22c99a2cd5707 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -9,6 +9,8 @@ * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. */ +#define COMPILE_OFFSETS + #include #include #include diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index 3a2836e9d8566..2a3fd8bbf6c2c 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c @@ -47,7 +47,7 @@ static struct resource standard_io_resources[] = { .name = "keyboard", .start = 0x60, .end = 0x6f, - .flags = IORESOURCE_IO | IORESOURCE_BUSY + .flags = IORESOURCE_IO }, { .name = "dma page reg", diff --git a/arch/nios2/kernel/asm-offsets.c b/arch/nios2/kernel/asm-offsets.c index e3d9b7b6fb48a..88190b503ce5d 100644 --- a/arch/nios2/kernel/asm-offsets.c +++ b/arch/nios2/kernel/asm-offsets.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2011 Tobias Klauser */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c index da122a5fa43b2..8528ab1f222cd 100644 --- a/arch/nios2/kernel/setup.c +++ b/arch/nios2/kernel/setup.c @@ -142,6 +142,20 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low, *max_high = PFN_DOWN(memblock_end_of_DRAM()); } +static void __init adjust_lowmem_bounds(void) +{ + phys_addr_t block_start, block_end; + u64 i; + phys_addr_t memblock_limit = 0; + + for_each_mem_range(i, &block_start, &block_end) { + if (block_end > memblock_limit) + memblock_limit = block_end; + } + + memblock_set_current_limit(memblock_limit); +} + void __init setup_arch(char **cmdline_p) { console_verbose(); @@ -155,6 +169,7 @@ void __init setup_arch(char **cmdline_p) /* Keep a copy of command line */ *cmdline_p = boot_command_line; + adjust_lowmem_bounds(); find_limits(&min_low_pfn, &max_low_pfn, &max_pfn); max_mapnr = max_low_pfn; diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c index 710651d5aaae1..3cc826f2216b1 100644 --- a/arch/openrisc/kernel/asm-offsets.c +++ b/arch/openrisc/kernel/asm-offsets.c @@ -18,6 +18,7 @@ * compile this file to assembler, and then extract the * #defines from the assembly-language output. */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/parisc/include/uapi/asm/ioctls.h b/arch/parisc/include/uapi/asm/ioctls.h index 82d1148c6379a..74b4027a4e808 100644 --- a/arch/parisc/include/uapi/asm/ioctls.h +++ b/arch/parisc/include/uapi/asm/ioctls.h @@ -10,10 +10,10 @@ #define TCSETS _IOW('T', 17, struct termios) /* TCSETATTR */ #define TCSETSW _IOW('T', 18, struct termios) /* TCSETATTRD */ #define TCSETSF _IOW('T', 19, struct termios) /* TCSETATTRF */ -#define TCGETA _IOR('T', 1, struct termio) -#define TCSETA _IOW('T', 2, struct termio) -#define TCSETAW _IOW('T', 3, struct termio) -#define TCSETAF _IOW('T', 4, struct termio) +#define TCGETA 0x40125401 +#define TCSETA 0x80125402 +#define TCSETAW 0x80125403 +#define TCSETAF 0x80125404 #define TCSBRK _IO('T', 5) #define TCXONC _IO('T', 6) #define TCFLSH _IO('T', 7) diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 757816a7bd4b2..9abfe65492c65 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -13,6 +13,7 @@ * Copyright (C) 2002 Randolph Chung * Copyright (C) 2003 James Bottomley */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index 69d65ffab3126..03165c82dfdbd 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -41,7 +41,6 @@ unsigned long raw_copy_from_user(void *dst, const void __user *src, mtsp(get_kernel_space(), SR_TEMP2); /* Check region is user accessible */ - if (start) while (start < end) { if (!prober_user(SR_TEMP1, start)) { newlen = (start - (unsigned long) src); diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index dd4eb30631758..f4390704d5ba2 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -7,8 +7,14 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), - pgtable_gfp_flags(mm, GFP_KERNEL)); + pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); + +#ifdef CONFIG_PPC_BOOK3S_603 + memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, + (MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); +#endif + return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h index bb5f3e8ea912d..4ef780b291bc3 100644 --- a/arch/powerpc/include/asm/nohash/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/pgalloc.h @@ -22,7 +22,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), pgtable_gfp_flags(mm, GFP_KERNEL)); -#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603) +#ifdef CONFIG_PPC_8xx memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); #endif diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 2f72ad885332e..2ec5431ce1be2 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -20,18 +20,6 @@ struct mm_struct; #include #endif /* !CONFIG_PPC_BOOK3S */ -/* - * Protection used for kernel text. We want the debuggers to be able to - * set breakpoints anywhere, so don't write protect the kernel text - * on platforms where such control is possible. - */ -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \ - defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) -#define PAGE_KERNEL_TEXT PAGE_KERNEL_X -#else -#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX -#endif - /* Make modules code happy. We don't set RO yet */ #define PAGE_KERNEL_EXEC PAGE_KERNEL_X diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 131a8cc10dbe8..cbeeda45c00a2 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -8,6 +8,7 @@ * compile this file to assembler, and then extract the * #defines from the assembly-language output. */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 56c5ebe21b99a..613606400ee99 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -162,7 +162,7 @@ instruction_counter: * For the MPC8xx, this is a software tablewalk to load the instruction * TLB. The task switch loads the M_TWB register with the pointer to the first * level table. - * If we discover there is no second level table (value is zero) or if there + * If there is no second level table (value is zero) or if there * is an invalid pte, we load that into the TLB, which causes another fault * into the TLB Error interrupt where we can handle such problems. * We have to use the MD_xxx registers for the tablewalk because the @@ -183,9 +183,6 @@ instruction_counter: mtspr SPRN_SPRG_SCRATCH2, r10 mtspr SPRN_M_TW, r11 - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ mfspr r10, SPRN_SRR0 /* Get effective address of fault */ INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11) mtspr SPRN_MD_EPN, r10 @@ -228,10 +225,6 @@ instruction_counter: mtspr SPRN_SPRG_SCRATCH2, r10 mtspr SPRN_M_TW, r11 - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - mfspr r10, SPRN_MD_EPN mfspr r10, SPRN_M_TWB /* Get level 1 table */ lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 2db167f4233f7..507e2ef50bd79 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -204,7 +204,7 @@ int mmu_mark_initmem_nx(void) for (i = 0; i < nb - 1 && base < top;) { size = bat_block_size(base, top); - setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); + setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X); base += size; } if (base < top) { @@ -215,7 +215,7 @@ int mmu_mark_initmem_nx(void) pr_warn("Some RW data is getting mapped X. " "Adjust CONFIG_DATA_SHIFT to avoid that.\n"); } - setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); + setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X); base += size; } for (; i < nb; i++) diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 787b222063866..e52d036c7a831 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -109,7 +109,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) p = memstart_addr + s; for (; s < top; s += PAGE_SIZE) { ktext = core_kernel_text(v); - map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); + map_kernel_page(v, p, ktext ? PAGE_KERNEL_X : PAGE_KERNEL); v += PAGE_SIZE; p += PAGE_SIZE; } diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index b0a14e48175c6..4d938c1bfdd38 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1897,7 +1897,7 @@ static int pnv_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, return 0; out: - irq_domain_free_irqs_parent(domain, virq, i - 1); + irq_domain_free_irqs_parent(domain, virq, i); msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, nr_irqs); return ret; } diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index ba98a680a12e6..169859df1aa91 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -592,7 +592,7 @@ static int pseries_irq_domain_alloc(struct irq_domain *domain, unsigned int virq out: /* TODO: handle RTAS cleanup in ->msi_finish() ? */ - irq_domain_free_irqs_parent(domain, virq, i - 1); + irq_domain_free_irqs_parent(domain, virq, i); return ret; } diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 03881122506a7..87c7d94c71f13 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -655,6 +655,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) return __pgprot(prot); } +#define pgprot_dmacoherent pgprot_writecombine + /* * THP functions */ diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index c2f3129a8e5cf..05c6152a65310 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -3,6 +3,7 @@ * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2017 SiFive */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index f6b13e9f5e6cb..3dbc8cc557dd1 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -62,10 +62,8 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo return -ENODEV; } - if (!of_device_is_available(node)) { - pr_info("CPU with hartid=%lu is not available\n", *hart); + if (!of_device_is_available(node)) return -ENODEV; - } if (of_property_read_string(node, "riscv,isa-base", &isa)) goto old_interface; diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index d2dacea1aedd9..0daba93c1a81e 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -49,10 +49,15 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) post_kprobe_handler(p, kcb, regs); } -static bool __kprobes arch_check_kprobe(struct kprobe *p) +static bool __kprobes arch_check_kprobe(unsigned long addr) { - unsigned long tmp = (unsigned long)p->addr - p->offset; - unsigned long addr = (unsigned long)p->addr; + unsigned long tmp, offset; + + /* start iterating at the closest preceding symbol */ + if (!kallsyms_lookup_size_offset(addr, NULL, &offset)) + return false; + + tmp = addr - offset; while (tmp <= addr) { if (tmp == addr) @@ -71,7 +76,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) if ((unsigned long)insn & 0x1) return -EILSEQ; - if (!arch_check_kprobe(p)) + if (!arch_check_kprobe((unsigned long)p->addr)) return -EILSEQ; /* copy instruction */ diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index cea0ca2bf2a25..fc62548888c58 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -25,6 +25,11 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair, bool first = true; int cpu; + if (pair->key != RISCV_HWPROBE_KEY_MVENDORID && + pair->key != RISCV_HWPROBE_KEY_MIMPID && + pair->key != RISCV_HWPROBE_KEY_MARCHID) + goto out; + for_each_cpu(cpu, cpus) { u64 cpu_id; @@ -55,6 +60,7 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair, } } +out: pair->value = id; } diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 563425b4963c9..497945aa3e2c4 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -559,6 +559,39 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, } } +/* + * Sign-extend the register if necessary + */ +static int sign_extend(u8 rd, u8 rs, u8 sz, bool sign, struct rv_jit_context *ctx) +{ + if (!sign && (sz == 1 || sz == 2)) { + if (rd != rs) + emit_mv(rd, rs, ctx); + return 0; + } + + switch (sz) { + case 1: + emit_sextb(rd, rs, ctx); + break; + case 2: + emit_sexth(rd, rs, ctx); + break; + case 4: + emit_sextw(rd, rs, ctx); + break; + case 8: + if (rd != rs) + emit_mv(rd, rs, ctx); + break; + default: + pr_err("bpf-jit: invalid size %d for sign_extend\n", sz); + return -EINVAL; + } + + return 0; +} + #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0) #define BPF_FIXUP_REG_MASK GENMASK(31, 27) #define REG_DONT_CLEAR_MARKER 0 /* RV_REG_ZERO unused in pt_regmap */ @@ -1020,8 +1053,15 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx); if (save_ret) { - emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx); emit_ld(regmap[BPF_REG_0], -(retval_off - 8), RV_REG_FP, ctx); + if (is_struct_ops) { + ret = sign_extend(RV_REG_A0, regmap[BPF_REG_0], m->ret_size, + m->ret_flags & BTF_FMODEL_SIGNED_ARG, ctx); + if (ret) + goto out; + } else { + emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx); + } } emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx); diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 5b97af3117092..f949f7e1000cf 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -25,6 +25,7 @@ endif KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11 KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR +KBUILD_CFLAGS_DECOMPRESSOR += -Wno-pointer-sign KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 5529248d84fb8..3cfc4939033c9 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -4,6 +4,7 @@ * This code generates raw asm output which is post-processed to extract * and format the required data. */ +#define COMPILE_OFFSETS #define ASM_OFFSETS_C diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index ff1ddba96352a..0d18d3267bc4f 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -202,6 +202,33 @@ SECTIONS . = ALIGN(PAGE_SIZE); _end = . ; + /* Debugging sections. */ + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + /* + * Make sure that the .got.plt is either completely empty or it + * contains only the three reserved double words. + */ + .got.plt : { + *(.got.plt) + } + ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, "Unexpected GOT/PLT entries detected!") + + /* + * Sections that should stay zero sized, which is safer to + * explicitly check instead of blindly discarding. + */ + .plt : { + *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt) + } + ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") + .rela.dyn : { + *(.rela.*) *(.rela_*) + } + ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") + /* * uncompressed image info used by the decompressor * it should match struct vmlinux_info @@ -232,33 +259,6 @@ SECTIONS #endif } :NONE - /* Debugging sections. */ - STABS_DEBUG - DWARF_DEBUG - ELF_DETAILS - - /* - * Make sure that the .got.plt is either completely empty or it - * contains only the three reserved double words. - */ - .got.plt : { - *(.got.plt) - } - ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, "Unexpected GOT/PLT entries detected!") - - /* - * Sections that should stay zero sized, which is safer to - * explicitly check instead of blindly discarding. - */ - .plt : { - *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt) - } - ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") - .rela.dyn : { - *(.rela.*) *(.rela_*) - } - ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") - /* Sections to be discarded */ DISCARDS /DISCARD/ : { diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index f5dece9353535..a2ec82ec78ac9 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -43,9 +43,13 @@ __initcall(page_table_register_sysctl); unsigned long *crst_table_alloc(struct mm_struct *mm) { - struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER); + gfp_t gfp = GFP_KERNEL_ACCOUNT; + struct ptdesc *ptdesc; unsigned long *table; + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + ptdesc = pagetable_alloc(gfp, CRST_ALLOC_ORDER); if (!ptdesc) return NULL; table = ptdesc_to_virt(ptdesc); @@ -142,7 +146,7 @@ struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm) struct ptdesc *ptdesc; u64 *table; - ptdesc = pagetable_alloc(GFP_KERNEL, 0); + ptdesc = pagetable_alloc(GFP_KERNEL_ACCOUNT, 0); if (ptdesc) { table = (u64 *)ptdesc_to_virt(ptdesc); __arch_set_page_dat(table, 1); @@ -161,10 +165,13 @@ void page_table_free_pgste(struct ptdesc *ptdesc) unsigned long *page_table_alloc(struct mm_struct *mm) { + gfp_t gfp = GFP_KERNEL_ACCOUNT; struct ptdesc *ptdesc; unsigned long *table; - ptdesc = pagetable_alloc(GFP_KERNEL, 0); + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + ptdesc = pagetable_alloc(gfp, 0); if (!ptdesc) return NULL; if (!pagetable_pte_ctor(ptdesc)) { diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h deleted file mode 100644 index 7822ea92e54af..0000000000000 --- a/arch/s390/net/bpf_jit.h +++ /dev/null @@ -1,55 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * BPF Jit compiler defines - * - * Copyright IBM Corp. 2012,2015 - * - * Author(s): Martin Schwidefsky - * Michael Holzheu - */ - -#ifndef __ARCH_S390_NET_BPF_JIT_H -#define __ARCH_S390_NET_BPF_JIT_H - -#ifndef __ASSEMBLY__ - -#include -#include - -#endif /* __ASSEMBLY__ */ - -/* - * Stackframe layout (packed stack): - * - * ^ high - * +---------------+ | - * | old backchain | | - * +---------------+ | - * | r15 - r6 | | - * +---------------+ | - * | 4 byte align | | - * | tail_call_cnt | | - * BFP -> +===============+ | - * | | | - * | BPF stack | | - * | | | - * R15+160 -> +---------------+ | - * | new backchain | | - * R15+152 -> +---------------+ | - * | + 152 byte SA | | - * R15 -> +---------------+ + low - * - * We get 160 bytes stack space from calling function, but only use - * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt. - * - * The stack size used by the BPF program ("BPF stack" above) is passed - * via "aux->stack_depth". - */ -#define STK_SPACE_ADD (160) -#define STK_160_UNUSED (160 - 12 * 8) -#define STK_OFF (STK_SPACE_ADD - STK_160_UNUSED) - -#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ -#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ - -#endif /* __ARCH_S390_NET_BPF_JIT_H */ diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index ead8d9ba9032c..f305cb42070df 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -32,7 +32,6 @@ #include #include #include -#include "bpf_jit.h" struct bpf_jit { u32 seen; /* Flags to remember seen eBPF instructions */ @@ -56,6 +55,7 @@ struct bpf_jit { int prologue_plt; /* Start of prologue hotpatch PLT */ int kern_arena; /* Pool offset of kernel arena address */ u64 user_arena; /* User arena address */ + u32 frame_off; /* Offset of struct bpf_prog from %r15 */ }; #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ @@ -403,12 +403,26 @@ static void jit_fill_hole(void *area, unsigned int size) memset(area, 0, size); } +/* + * Caller-allocated part of the frame. + * Thanks to packed stack, its otherwise unused initial part can be used for + * the BPF stack and for the next frame. + */ +struct prog_frame { + u64 unused[8]; + /* BPF stack starts here and grows towards 0 */ + u32 tail_call_cnt; + u32 pad; + u64 r6[10]; /* r6 - r15 */ + u64 backchain; +} __packed; + /* * Save registers from "rs" (register start) to "re" (register end) on stack */ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re) { - u32 off = STK_OFF_R6 + (rs - 6) * 8; + u32 off = offsetof(struct prog_frame, r6) + (rs - 6) * 8; if (rs == re) /* stg %rs,off(%r15) */ @@ -421,12 +435,9 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re) /* * Restore registers from "rs" (register start) to "re" (register end) on stack */ -static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth) +static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re) { - u32 off = STK_OFF_R6 + (rs - 6) * 8; - - if (jit->seen & SEEN_STACK) - off += STK_OFF + stack_depth; + u32 off = jit->frame_off + offsetof(struct prog_frame, r6) + (rs - 6) * 8; if (rs == re) /* lg %rs,off(%r15) */ @@ -470,8 +481,7 @@ static int get_end(u16 seen_regs, int start) * Save and restore clobbered registers (6-15) on stack. * We save/restore registers in chunks with gap >= 2 registers. */ -static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth, - u16 extra_regs) +static void save_restore_regs(struct bpf_jit *jit, int op, u16 extra_regs) { u16 seen_regs = jit->seen_regs | extra_regs; const int last = 15, save_restore_size = 6; @@ -494,7 +504,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth, if (op == REGS_SAVE) save_regs(jit, rs, re); else - restore_regs(jit, rs, re, stack_depth); + restore_regs(jit, rs, re); re++; } while (re <= last); } @@ -559,11 +569,12 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target) * Emit function prologue * * Save registers and create stack frame if necessary. - * See stack frame layout description in "bpf_jit.h"! + * Stack frame layout is described by struct prog_frame. */ -static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, - u32 stack_depth) +static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp) { + BUILD_BUG_ON(sizeof(struct prog_frame) != STACK_FRAME_OVERHEAD); + /* No-op for hotpatching */ /* brcl 0,prologue_plt */ EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt); @@ -571,8 +582,9 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, if (!bpf_is_subprog(fp)) { /* Initialize the tail call counter in the main program. */ - /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */ - _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT); + /* xc tail_call_cnt(4,%r15),tail_call_cnt(%r15) */ + _EMIT6(0xd703f000 | offsetof(struct prog_frame, tail_call_cnt), + 0xf000 | offsetof(struct prog_frame, tail_call_cnt)); } else { /* * Skip the tail call counter initialization in subprograms. @@ -595,7 +607,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, jit->seen_regs |= NVREGS; } else { /* Save registers */ - save_restore_regs(jit, REGS_SAVE, stack_depth, + save_restore_regs(jit, REGS_SAVE, fp->aux->exception_boundary ? NVREGS : 0); } /* Setup literal pool */ @@ -615,13 +627,15 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) { /* lgr %w1,%r15 (backchain) */ EMIT4(0xb9040000, REG_W1, REG_15); - /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */ - EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED); - /* aghi %r15,-STK_OFF */ - EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth)); - /* stg %w1,152(%r15) (backchain) */ + /* la %bfp,unused_end(%r15) (BPF frame pointer) */ + EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, + offsetofend(struct prog_frame, unused)); + /* aghi %r15,-frame_off */ + EMIT4_IMM(0xa70b0000, REG_15, -jit->frame_off); + /* stg %w1,backchain(%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, - REG_15, 152); + REG_15, + offsetof(struct prog_frame, backchain)); } } @@ -665,13 +679,13 @@ static void call_r1(struct bpf_jit *jit) /* * Function epilogue */ -static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) +static void bpf_jit_epilogue(struct bpf_jit *jit) { jit->exit_ip = jit->prg; /* Load exit code: lgr %r2,%b0 */ EMIT4(0xb9040000, REG_2, BPF_REG_0); /* Restore registers */ - save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); + save_restore_regs(jit, REGS_RESTORE, 0); if (nospec_uses_trampoline()) { jit->r14_thunk_ip = jit->prg; /* Generate __s390_indirect_jump_r14 thunk */ @@ -862,7 +876,7 @@ static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags) * stack space for the large switch statement. */ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, - int i, bool extra_pass, u32 stack_depth) + int i, bool extra_pass) { struct bpf_insn *insn = &fp->insnsi[i]; s32 branch_oc_off = insn->off; @@ -1775,17 +1789,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, jit->seen |= SEEN_FUNC; /* * Copy the tail call counter to where the callee expects it. - * - * Note 1: The callee can increment the tail call counter, but - * we do not load it back, since the x86 JIT does not do this - * either. - * - * Note 2: We assume that the verifier does not let us call the - * main program, which clears the tail call counter on entry. */ - /* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */ - _EMIT6(0xd203f000 | STK_OFF_TCCNT, - 0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth)); + /* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */ + _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt), + 0xf000 | (jit->frame_off + + offsetof(struct prog_frame, tail_call_cnt))); /* Sign-extend the kfunc arguments. */ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { @@ -1807,6 +1815,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, call_r1(jit); /* lgr %b0,%r2: load return value into %b0 */ EMIT4(0xb9040000, BPF_REG_0, REG_2); + + /* + * Copy the potentially updated tail call counter back. + */ + + if (insn->src_reg == BPF_PSEUDO_CALL) + /* + * mvc frame_off+tail_call_cnt(%r15), + * tail_call_cnt(4,%r15) + */ + _EMIT6(0xd203f000 | (jit->frame_off + + offsetof(struct prog_frame, + tail_call_cnt)), + 0xf000 | offsetof(struct prog_frame, + tail_call_cnt)); + break; } case BPF_JMP | BPF_TAIL_CALL: { @@ -1836,10 +1860,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, * goto out; */ - if (jit->seen & SEEN_STACK) - off = STK_OFF_TCCNT + STK_OFF + stack_depth; - else - off = STK_OFF_TCCNT; + off = jit->frame_off + + offsetof(struct prog_frame, tail_call_cnt); /* lhi %w0,1 */ EMIT4_IMM(0xa7080000, REG_W0, 1); /* laal %w1,%w0,off(%r15) */ @@ -1869,7 +1891,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* * Restore registers before calling function */ - save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); + save_restore_regs(jit, REGS_RESTORE, 0); /* * goto *(prog->bpf_func + tail_call_start); @@ -2161,7 +2183,7 @@ static int bpf_set_addr(struct bpf_jit *jit, int i) * Compile eBPF program into s390x code */ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, - bool extra_pass, u32 stack_depth) + bool extra_pass) { int i, insn_count, lit32_size, lit64_size; u64 kern_arena; @@ -2170,24 +2192,30 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, jit->lit64 = jit->lit64_start; jit->prg = 0; jit->excnt = 0; + if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) + jit->frame_off = sizeof(struct prog_frame) - + offsetofend(struct prog_frame, unused) + + round_up(fp->aux->stack_depth, 8); + else + jit->frame_off = 0; kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena); if (kern_arena) jit->kern_arena = _EMIT_CONST_U64(kern_arena); jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena); - bpf_jit_prologue(jit, fp, stack_depth); + bpf_jit_prologue(jit, fp); if (bpf_set_addr(jit, 0) < 0) return -1; for (i = 0; i < fp->len; i += insn_count) { - insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth); + insn_count = bpf_jit_insn(jit, fp, i, extra_pass); if (insn_count < 0) return -1; /* Next instruction address */ if (bpf_set_addr(jit, i + insn_count) < 0) return -1; } - bpf_jit_epilogue(jit, stack_depth); + bpf_jit_epilogue(jit); lit32_size = jit->lit32 - jit->lit32_start; lit64_size = jit->lit64 - jit->lit64_start; @@ -2263,7 +2291,6 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit, */ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { - u32 stack_depth = round_up(fp->aux->stack_depth, 8); struct bpf_prog *tmp, *orig_fp = fp; struct bpf_binary_header *header; struct s390_jit_data *jit_data; @@ -2316,7 +2343,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) * - 3: Calculate program size and addrs array */ for (pass = 1; pass <= 3; pass++) { - if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) { + if (bpf_jit_prog(&jit, fp, extra_pass)) { fp = orig_fp; goto free_addrs; } @@ -2330,7 +2357,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) goto free_addrs; } skip_init_ctx: - if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) { + if (bpf_jit_prog(&jit, fp, extra_pass)) { bpf_jit_binary_free(header); fp = orig_fp; goto free_addrs; @@ -2651,9 +2678,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, /* stg %r1,backchain_off(%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15, tjit->backchain_off); - /* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */ + /* mvc tccnt_off(4,%r15),stack_size+tail_call_cnt(%r15) */ _EMIT6(0xd203f000 | tjit->tccnt_off, - 0xf000 | (tjit->stack_size + STK_OFF_TCCNT)); + 0xf000 | (tjit->stack_size + + offsetof(struct prog_frame, tail_call_cnt))); /* stmg %r2,%rN,fwd_reg_args_off(%r15) */ if (nr_reg_args) EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2, @@ -2790,8 +2818,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, (nr_stack_args * sizeof(u64) - 1) << 16 | tjit->stack_args_off, 0xf000 | tjit->orig_stack_args_off); - /* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */ - _EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off); + /* mvc tail_call_cnt(4,%r15),tccnt_off(%r15) */ + _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt), + 0xf000 | tjit->tccnt_off); /* lgr %r1,%r8 */ EMIT4(0xb9040000, REG_1, REG_8); /* %r1() */ @@ -2799,6 +2828,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, /* stg %r2,retval_off(%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15, tjit->retval_off); + /* mvc tccnt_off(%r15),tail_call_cnt(4,%r15) */ + _EMIT6(0xd203f000 | tjit->tccnt_off, + 0xf000 | offsetof(struct prog_frame, tail_call_cnt)); im->ip_after_call = jit->prg_buf + jit->prg; @@ -2848,8 +2880,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET)) EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15, tjit->retval_off); - /* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */ - _EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT), + /* mvc stack_size+tail_call_cnt(4,%r15),tccnt_off(%r15) */ + _EMIT6(0xd203f000 | (tjit->stack_size + + offsetof(struct prog_frame, tail_call_cnt)), 0xf000 | tjit->tccnt_off); /* aghi %r15,stack_size */ EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size); diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c index a0322e8328456..429b6a7631468 100644 --- a/arch/sh/kernel/asm-offsets.c +++ b/arch/sh/kernel/asm-offsets.c @@ -8,6 +8,7 @@ * compile this file to assembler, and then extract the * #defines from the assembly-language output. */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index 3d9b9855dce91..6e660bde48dd8 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c @@ -10,6 +10,7 @@ * * On sparc, thread_info data is static and TI_XXX offsets are computed by hand. */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 06012e68bdcae..284a4cafa4324 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c @@ -387,6 +387,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, if (of_device_register(op)) { printk("%pOF: Could not register of device.\n", dp); + put_device(&op->dev); kfree(op); op = NULL; } diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index f98c2901f3357..f53092b07b9e7 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -677,6 +677,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, if (of_device_register(op)) { printk("%pOF: Could not register of device.\n", dp); + put_device(&op->dev); kfree(op); op = NULL; } diff --git a/arch/sparc/lib/M7memcpy.S b/arch/sparc/lib/M7memcpy.S index cbd42ea7c3f7c..99357bfa8e82a 100644 --- a/arch/sparc/lib/M7memcpy.S +++ b/arch/sparc/lib/M7memcpy.S @@ -696,16 +696,16 @@ FUNC_NAME: EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40) faligndata %f24, %f26, %f10 EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40) - EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40) + EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_32) faligndata %f26, %f28, %f12 - EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_32) add %o4, 64, %o4 - EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40) + EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_24) faligndata %f28, %f30, %f14 - EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40) - EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_24) + EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_16) add %o0, 64, %o0 - EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_8) fsrc2 %f30, %f14 bgu,pt %xcc, .Lunalign_sloop prefetch [%o4 + (8 * BLOCK_SIZE)], 20 @@ -728,7 +728,7 @@ FUNC_NAME: add %o4, 8, %o4 faligndata %f0, %f2, %f16 subcc %o5, 8, %o5 - EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5) + EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_8) fsrc2 %f2, %f0 bgu,pt %xcc, .Lunalign_by8 add %o0, 8, %o0 @@ -772,7 +772,7 @@ FUNC_NAME: subcc %o5, 0x20, %o5 EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) - EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_16) EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) bne,pt %xcc, 1b add %o0, 0x20, %o0 @@ -804,12 +804,12 @@ FUNC_NAME: brz,pt %o3, 2f sub %o2, %o3, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_o3) add %o1, 1, %o1 subcc %o3, 1, %o3 add %o0, 1, %o0 bne,pt %xcc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_o3_plus_1) 2: and %o1, 0x7, %o3 brz,pn %o3, .Lmedium_noprefetch_cp diff --git a/arch/sparc/lib/Memcpy_utils.S b/arch/sparc/lib/Memcpy_utils.S index 64fbac28b3db1..207343367bb2d 100644 --- a/arch/sparc/lib/Memcpy_utils.S +++ b/arch/sparc/lib/Memcpy_utils.S @@ -137,6 +137,15 @@ ENTRY(memcpy_retl_o2_plus_63_8) ba,pt %xcc, __restore_asi add %o2, 8, %o0 ENDPROC(memcpy_retl_o2_plus_63_8) +ENTRY(memcpy_retl_o2_plus_o3) + ba,pt %xcc, __restore_asi + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3) +ENTRY(memcpy_retl_o2_plus_o3_plus_1) + add %o3, 1, %o3 + ba,pt %xcc, __restore_asi + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_plus_1) ENTRY(memcpy_retl_o2_plus_o5) ba,pt %xcc, __restore_asi add %o2, %o5, %o0 diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 7ad58ebe0d009..df0ec1bd19489 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -281,7 +281,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ subcc %o5, 0x20, %o5 EX_ST(STORE(stx, %g1, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_16) EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) bne,pt %icc, 1b add %o0, 0x20, %o0 diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S index ee51c12306894..bbd3ea0a64822 100644 --- a/arch/sparc/lib/NGmemcpy.S +++ b/arch/sparc/lib/NGmemcpy.S @@ -79,8 +79,8 @@ #ifndef EX_RETVAL #define EX_RETVAL(x) x __restore_asi: - ret wr %g0, ASI_AIUS, %asi + ret restore ENTRY(NG_ret_i2_plus_i4_plus_1) ba,pt %xcc, __restore_asi @@ -125,15 +125,16 @@ ENTRY(NG_ret_i2_plus_g1_minus_56) ba,pt %xcc, __restore_asi add %i2, %g1, %i0 ENDPROC(NG_ret_i2_plus_g1_minus_56) -ENTRY(NG_ret_i2_plus_i4) +ENTRY(NG_ret_i2_plus_i4_plus_16) + add %i4, 16, %i4 ba,pt %xcc, __restore_asi add %i2, %i4, %i0 -ENDPROC(NG_ret_i2_plus_i4) -ENTRY(NG_ret_i2_plus_i4_minus_8) - sub %i4, 8, %i4 +ENDPROC(NG_ret_i2_plus_i4_plus_16) +ENTRY(NG_ret_i2_plus_i4_plus_8) + add %i4, 8, %i4 ba,pt %xcc, __restore_asi add %i2, %i4, %i0 -ENDPROC(NG_ret_i2_plus_i4_minus_8) +ENDPROC(NG_ret_i2_plus_i4_plus_8) ENTRY(NG_ret_i2_plus_8) ba,pt %xcc, __restore_asi add %i2, 8, %i0 @@ -160,6 +161,12 @@ ENTRY(NG_ret_i2_and_7_plus_i4) ba,pt %xcc, __restore_asi add %i2, %i4, %i0 ENDPROC(NG_ret_i2_and_7_plus_i4) +ENTRY(NG_ret_i2_and_7_plus_i4_plus_8) + and %i2, 7, %i2 + add %i4, 8, %i4 + ba,pt %xcc, __restore_asi + add %i2, %i4, %i0 +ENDPROC(NG_ret_i2_and_7_plus_i4) #endif .align 64 @@ -405,13 +412,13 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ andn %i2, 0xf, %i4 and %i2, 0xf, %i2 1: subcc %i4, 0x10, %i4 - EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4) + EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4_plus_16) add %i1, 0x08, %i1 - EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4) + EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4_plus_16) sub %i1, 0x08, %i1 - EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4) + EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4_plus_16) add %i1, 0x8, %i1 - EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8) + EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_plus_8) bgu,pt %XCC, 1b add %i1, 0x8, %i1 73: andcc %i2, 0x8, %g0 @@ -468,7 +475,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ subcc %i4, 0x8, %i4 srlx %g3, %i3, %i5 or %i5, %g2, %i5 - EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4) + EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4_plus_8) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S index 635398ec7540e..154fbd35400ca 100644 --- a/arch/sparc/lib/U1memcpy.S +++ b/arch/sparc/lib/U1memcpy.S @@ -164,17 +164,18 @@ ENTRY(U1_gs_40_fp) retl add %o0, %o2, %o0 ENDPROC(U1_gs_40_fp) -ENTRY(U1_g3_0_fp) - VISExitHalf - retl - add %g3, %o2, %o0 -ENDPROC(U1_g3_0_fp) ENTRY(U1_g3_8_fp) VISExitHalf add %g3, 8, %g3 retl add %g3, %o2, %o0 ENDPROC(U1_g3_8_fp) +ENTRY(U1_g3_16_fp) + VISExitHalf + add %g3, 16, %g3 + retl + add %g3, %o2, %o0 +ENDPROC(U1_g3_16_fp) ENTRY(U1_o2_0_fp) VISExitHalf retl @@ -547,18 +548,18 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 62: FINISH_VISCHUNK(o0, f44, f46) 63: UNEVEN_VISCHUNK_LAST(o0, f46, f0) -93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp) +93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_8_fp) add %o1, 8, %o1 subcc %g3, 8, %g3 faligndata %f0, %f2, %f8 - EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp) + EX_ST_FP(STORE(std, %f8, %o0), U1_g3_16_fp) bl,pn %xcc, 95f add %o0, 8, %o0 - EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp) + EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_8_fp) add %o1, 8, %o1 subcc %g3, 8, %g3 faligndata %f2, %f0, %f8 - EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp) + EX_ST_FP(STORE(std, %f8, %o0), U1_g3_16_fp) bge,pt %xcc, 93b add %o0, 8, %o0 diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 9248d59c734ce..bace3a18f836f 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S @@ -267,6 +267,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ faligndata %f10, %f12, %f26 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2) + and %o2, 0x3f, %o2 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE add %o1, 0x40, %o1 bgu,pt %XCC, 1f @@ -336,7 +337,6 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ * Also notice how this code is careful not to perform a * load past the end of the src buffer. */ - and %o2, 0x3f, %o2 andcc %o2, 0x38, %g2 be,pn %XCC, 2f subcc %g2, 0x8, %g2 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index c276d70a74799..305f191ba81b1 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -130,6 +130,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) { + unsigned long hugepage_size = _PAGE_SZ4MB_4U; + + pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4U; + + switch (shift) { + case HPAGE_256MB_SHIFT: + hugepage_size = _PAGE_SZ256MB_4U; + pte_val(entry) |= _PAGE_PMD_HUGE; + break; + case HPAGE_SHIFT: + pte_val(entry) |= _PAGE_PMD_HUGE; + break; + case HPAGE_64K_SHIFT: + hugepage_size = _PAGE_SZ64K_4U; + break; + default: + WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); + } + + pte_val(entry) = pte_val(entry) | hugepage_size; return entry; } diff --git a/arch/um/kernel/asm-offsets.c b/arch/um/kernel/asm-offsets.c index 1fb12235ab9c8..a69873aa697f4 100644 --- a/arch/um/kernel/asm-offsets.c +++ b/arch/um/kernel/asm-offsets.c @@ -1 +1,3 @@ +#define COMPILE_OFFSETS + #include diff --git a/arch/x86/entry/entry_64_fred.S b/arch/x86/entry/entry_64_fred.S index a02bc6f3d2e6a..0ca9db540b16d 100644 --- a/arch/x86/entry/entry_64_fred.S +++ b/arch/x86/entry/entry_64_fred.S @@ -16,7 +16,7 @@ .macro FRED_ENTER UNWIND_HINT_END_OF_STACK - ENDBR + ANNOTATE_NOENDBR PUSH_AND_CLEAR_REGS movq %rsp, %rdi /* %rdi -> pt_regs */ .endm diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 36d8404f406de..acc0774519ce2 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2812,8 +2812,8 @@ static void intel_pmu_enable_fixed(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - u64 mask, bits = 0; int idx = hwc->idx; + u64 bits = 0; if (is_topdown_idx(idx)) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); @@ -2849,14 +2849,10 @@ static void intel_pmu_enable_fixed(struct perf_event *event) idx -= INTEL_PMC_IDX_FIXED; bits = intel_fixed_bits_by_idx(idx, bits); - mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK); - - if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { + if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); - mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); - } - cpuc->fixed_ctrl_val &= ~mask; + cpuc->fixed_ctrl_val &= ~intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK); cpuc->fixed_ctrl_val |= bits; } diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index af87f440bc2ac..8c345ad458415 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -681,7 +681,7 @@ void __init hv_vtom_init(void) x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility; /* Set WB as the default cache mode. */ - mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK); + guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK); } #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 21d07aa9400c7..b67280d761f63 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -722,6 +722,7 @@ #define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300 #define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301 #define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302 +#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET 0xc0000303 /* AMD Last Branch Record MSRs */ #define MSR_AMD64_LBR_SELECT 0xc000010e diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index 4218248083d98..c69e269937c56 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h @@ -58,8 +58,8 @@ struct mtrr_state_type { */ # ifdef CONFIG_MTRR void mtrr_bp_init(void); -void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var, - mtrr_type def_type); +void guest_force_mtrr_state(struct mtrr_var_range *var, unsigned int num_var, + mtrr_type def_type); extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform); extern void mtrr_save_fixed_ranges(void *); extern void mtrr_save_state(void); @@ -75,9 +75,9 @@ void mtrr_disable(void); void mtrr_enable(void); void mtrr_generic_set_state(void); # else -static inline void mtrr_overwrite_state(struct mtrr_var_range *var, - unsigned int num_var, - mtrr_type def_type) +static inline void guest_force_mtrr_state(struct mtrr_var_range *var, + unsigned int num_var, + mtrr_type def_type) { } diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index aa351c4a20eee..c69b6498f6eaa 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -35,7 +35,6 @@ #define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36) #define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40) -#define INTEL_FIXED_BITS_MASK 0xFULL #define INTEL_FIXED_BITS_STRIDE 4 #define INTEL_FIXED_0_KERNEL (1ULL << 0) #define INTEL_FIXED_0_USER (1ULL << 1) @@ -47,6 +46,11 @@ #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34) #define ICL_FIXED_0_ADAPTIVE (1ULL << 32) +#define INTEL_FIXED_BITS_MASK \ + (INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER | \ + INTEL_FIXED_0_ANYTHREAD | INTEL_FIXED_0_ENABLE_PMI | \ + ICL_FIXED_0_ADAPTIVE) + #define intel_fixed_bits_by_idx(_idx, _bits) \ ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE)) diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 9d6411c659205..00cefbb59fa98 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -244,7 +244,7 @@ static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node) static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node) { - unsigned int p; + unsigned long p; /* * Load CPU and node number from the GDT. LSL is faster than RDTSCP @@ -254,10 +254,10 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node) * * If RDPID is available, use it. */ - alternative_io ("lsl %[seg],%[p]", - ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ + alternative_io ("lsl %[seg],%k[p]", + "rdpid %[p]", X86_FEATURE_RDPID, - [p] "=a" (p), [seg] "r" (__CPUNODE_SEG)); + [p] "=r" (p), [seg] "r" (__CPUNODE_SEG)); if (cpu) *cpu = (p & VDSO_CPUNODE_MASK); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index f3cb559a598df..939401b5d2ef0 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1186,8 +1186,10 @@ static void __init retbleed_select_mitigation(void) retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; break; default: - if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) + if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) { pr_err(RETBLEED_INTEL_MSG); + retbleed_mitigation = RETBLEED_MITIGATION_NONE; + } } } @@ -1596,7 +1598,7 @@ spectre_v2_user_select_mitigation(void) static const char * const spectre_v2_strings[] = { [SPECTRE_V2_NONE] = "Vulnerable", [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", - [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", + [SPECTRE_V2_LFENCE] = "Vulnerable: LFENCE", [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", @@ -3249,9 +3251,6 @@ static const char *spectre_bhi_state(void) static ssize_t spectre_v2_show_state(char *buf) { - if (spectre_v2_enabled == SPECTRE_V2_LFENCE) - return sysfs_emit(buf, "Vulnerable: LFENCE\n"); - if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 910accfeb7856..5d2949b1e4b3c 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -182,7 +182,7 @@ static bool need_sha_check(u32 cur_rev) } switch (cur_rev >> 8) { - case 0x80012: return cur_rev <= 0x800126f; break; + case 0x80012: return cur_rev <= 0x8001277; break; case 0x80082: return cur_rev <= 0x800820f; break; case 0x83010: return cur_rev <= 0x830107c; break; case 0x86001: return cur_rev <= 0x860010e; break; diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 1ececfce7a46a..4b1e2d14e5aac 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -423,7 +423,7 @@ void __init mtrr_copy_map(void) } /** - * mtrr_overwrite_state - set static MTRR state + * guest_force_mtrr_state - set static MTRR state for a guest * * Used to set MTRR state via different means (e.g. with data obtained from * a hypervisor). @@ -436,8 +436,8 @@ void __init mtrr_copy_map(void) * @num_var: length of the @var array * @def_type: default caching type */ -void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var, - mtrr_type def_type) +void guest_force_mtrr_state(struct mtrr_var_range *var, unsigned int num_var, + mtrr_type def_type) { unsigned int i; diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 989d368be04fc..ecbda0341a8a3 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -625,7 +625,7 @@ void mtrr_save_state(void) static int __init mtrr_init_finalize(void) { /* - * Map might exist if mtrr_overwrite_state() has been called or if + * Map might exist if guest_force_mtrr_state() has been called or if * mtrr_enabled() returns true. */ mtrr_copy_map(); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 851b561850e0c..c117fba4f8da9 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -312,15 +312,35 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) return chunks >> shift; } +static u64 get_corrected_val(struct rdt_resource *r, struct rdt_mon_domain *d, + u32 rmid, enum resctrl_event_id eventid, u64 msr_val) +{ + struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d); + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); + struct arch_mbm_state *am; + u64 chunks; + + am = get_arch_mbm_state(hw_dom, rmid, eventid); + if (am) { + am->chunks += mbm_overflow_count(am->prev_msr, msr_val, + hw_res->mbm_width); + chunks = get_corrected_mbm_count(rmid, am->chunks); + am->prev_msr = msr_val; + } else { + chunks = msr_val; + } + + return chunks * hw_res->mon_scale; +} + int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d, u32 unused, u32 rmid, enum resctrl_event_id eventid, u64 *val, void *ignored) { struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d); - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); int cpu = cpumask_any(&d->hdr.cpu_mask); struct arch_mbm_state *am; - u64 msr_val, chunks; + u64 msr_val; u32 prmid; int ret; @@ -328,22 +348,16 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d, prmid = logical_rmid_to_physical_rmid(cpu, rmid); ret = __rmid_read_phys(prmid, eventid, &msr_val); - if (ret) - return ret; - am = get_arch_mbm_state(hw_dom, rmid, eventid); - if (am) { - am->chunks += mbm_overflow_count(am->prev_msr, msr_val, - hw_res->mbm_width); - chunks = get_corrected_mbm_count(rmid, am->chunks); - am->prev_msr = msr_val; - } else { - chunks = msr_val; + if (!ret) { + *val = get_corrected_val(r, d, rmid, eventid, msr_val); + } else if (ret == -EINVAL) { + am = get_arch_mbm_state(hw_dom, rmid, eventid); + if (am) + am->prev_msr = 0; } - *val = chunks * hw_res->mon_scale; - - return 0; + return ret; } static void limbo_release_entry(struct rmid_entry *entry) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 21e9e48453541..bd21c568bc2ad 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -933,6 +933,19 @@ static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc) static void __init kvm_init_platform(void) { + u64 tolud = PFN_PHYS(e820__end_of_low_ram_pfn()); + /* + * Note, hardware requires variable MTRR ranges to be power-of-2 sized + * and naturally aligned. But when forcing guest MTRR state, Linux + * doesn't program the forced ranges into hardware. Don't bother doing + * the math to generate a technically-legal range. + */ + struct mtrr_var_range pci_hole = { + .base_lo = tolud | X86_MEMTYPE_UC, + .mask_lo = (u32)(~(SZ_4G - tolud - 1)) | MTRR_PHYSMASK_V, + .mask_hi = (BIT_ULL(boot_cpu_data.x86_phys_bits) - 1) >> 32, + }; + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) { unsigned long nr_pages; @@ -982,8 +995,12 @@ static void __init kvm_init_platform(void) kvmclock_init(); x86_platform.apic_post_init = kvm_apic_init; - /* Set WB as the default cache mode for SEV-SNP and TDX */ - mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK); + /* + * Set WB as the default cache mode for SEV-SNP and TDX, with a single + * UC range for the legacy PCI hole, e.g. so that devices that expect + * to get UC/WC mappings don't get surprised with WB. + */ + guest_force_mtrr_state(&pci_hole, 1, MTRR_TYPE_WRBACK); } #if defined(CONFIG_AMD_MEM_ENCRYPT) diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index 5a4b21389b1d9..d432f3824f0c2 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -156,15 +156,26 @@ static int identify_insn(struct insn *insn) if (!insn->modrm.nbytes) return -EINVAL; - /* All the instructions of interest start with 0x0f. */ - if (insn->opcode.bytes[0] != 0xf) + /* The instructions of interest have 2-byte opcodes: 0F 00 or 0F 01. */ + if (insn->opcode.nbytes < 2 || insn->opcode.bytes[0] != 0xf) return -EINVAL; if (insn->opcode.bytes[1] == 0x1) { switch (X86_MODRM_REG(insn->modrm.value)) { case 0: + /* The reg form of 0F 01 /0 encodes VMX instructions. */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) + return -EINVAL; + return UMIP_INST_SGDT; case 1: + /* + * The reg form of 0F 01 /1 encodes MONITOR/MWAIT, + * STAC/CLAC, and ENCLS. + */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) + return -EINVAL; + return UMIP_INST_SIDT; case 4: return UMIP_INST_SMSW; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 8f587c5bb6bc4..a7a1486ce2704 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -816,7 +816,7 @@ void kvm_set_cpu_caps(void) F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ | F(VERW_CLEAR) | F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ | - F(WRMSR_XX_BASE_NS) + F(WRMSR_XX_BASE_NS) | F(SRSO_USER_KERNEL_NO) ); kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 60986f67c35a8..4b43944141034 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -5104,12 +5104,11 @@ void init_decode_cache(struct x86_emulate_ctxt *ctxt) ctxt->mem_read.end = 0; } -int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) +int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts) { const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; - bool is_guest_mode = ctxt->ops->is_guest_mode(ctxt); ctxt->mem_read.pos = 0; @@ -5157,7 +5156,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) fetch_possible_mmx_operand(&ctxt->dst); } - if (unlikely(is_guest_mode) && ctxt->intercept) { + if (unlikely(check_intercepts) && ctxt->intercept) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5186,7 +5185,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) goto done; } - if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) { + if (unlikely(check_intercepts) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5240,7 +5239,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) special_insn: - if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) { + if (unlikely(check_intercepts) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 10495fffb8905..1bede46b67c3f 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -230,7 +230,6 @@ struct x86_emulate_ops { void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); bool (*is_smm)(struct x86_emulate_ctxt *ctxt); - bool (*is_guest_mode)(struct x86_emulate_ctxt *ctxt); int (*leave_smm)(struct x86_emulate_ctxt *ctxt); void (*triple_fault)(struct x86_emulate_ctxt *ctxt); int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr); @@ -514,7 +513,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt); #define EMULATION_RESTART 1 #define EMULATION_INTERCEPTED 2 void init_decode_cache(struct x86_emulate_ctxt *ctxt); -int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); +int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts); int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code); diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 47a46283c8667..a26e8f5ad7901 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -650,6 +650,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = pmu->global_ctrl; break; case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET: case MSR_CORE_PERF_GLOBAL_OVF_CTRL: msr_info->data = 0; break; @@ -711,6 +712,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!msr_info->host_initiated) pmu->global_status &= ~data; break; + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET: + if (!msr_info->host_initiated) + pmu->global_status |= data & ~pmu->global_status_rsvd; + break; default: kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index); return kvm_pmu_call(set_msr)(vcpu, msr_info); diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index ad89d0bd60058..103604c4b33b5 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -13,7 +13,7 @@ #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \ MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) -/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ +/* retrieve a fixed counter bits out of IA32_FIXED_CTR_CTRL */ #define fixed_ctrl_field(ctrl_reg, idx) \ (((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK) diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 22d5a65b410c9..dbca6453ec4d7 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -113,6 +113,7 @@ static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET: return pmu->version > 1; default: if (msr > MSR_F15H_PERF_CTR5 && diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9170a9e127b7a..c48a466db0c37 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4237,13 +4237,21 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu) static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb_control_area *control = &svm->vmcb->control; + + /* + * Next RIP must be provided as IRQs are disabled, and accessing guest + * memory to decode the instruction might fault, i.e. might sleep. + */ + if (!nrips || !control->next_rip) + return EXIT_FASTPATH_NONE; if (is_guest_mode(vcpu)) return EXIT_FASTPATH_NONE; - switch (svm->vmcb->control.exit_code) { + switch (control->exit_code) { case SVM_EXIT_MSR: - if (!svm->vmcb->control.exit_info_1) + if (!control->exit_info_1) break; return handle_fastpath_set_msr_irqoff(vcpu); case SVM_EXIT_HLT: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 13ab13d2e9d67..20f89bceaeae9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -364,6 +364,7 @@ static const u32 msrs_to_save_pmu[] = { MSR_AMD64_PERF_CNTR_GLOBAL_CTL, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, + MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET, }; static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) + @@ -7449,6 +7450,7 @@ static void kvm_probe_msr_to_save(u32 msr_index) case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET: if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) return; break; @@ -8567,11 +8569,6 @@ static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt) return is_smm(emul_to_vcpu(ctxt)); } -static bool emulator_is_guest_mode(struct x86_emulate_ctxt *ctxt) -{ - return is_guest_mode(emul_to_vcpu(ctxt)); -} - #ifndef CONFIG_KVM_SMM static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) { @@ -8655,7 +8652,6 @@ static const struct x86_emulate_ops emulate_ops = { .guest_cpuid_is_intel_compatible = emulator_guest_cpuid_is_intel_compatible, .set_nmi_mask = emulator_set_nmi_mask, .is_smm = emulator_is_smm, - .is_guest_mode = emulator_is_guest_mode, .leave_smm = emulator_leave_smm, .triple_fault = emulator_triple_fault, .set_xcr = emulator_set_xcr, @@ -9209,7 +9205,14 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ctxt->exception.address = 0; } - r = x86_emulate_insn(ctxt); + /* + * Check L1's instruction intercepts when emulating instructions for + * L2, unless KVM is re-emulating a previously decoded instruction, + * e.g. to complete userspace I/O, in which case KVM has already + * checked the intercepts. + */ + r = x86_emulate_insn(ctxt, is_guest_mode(vcpu) && + !(emulation_type & EMULTYPE_NO_DECODE)); if (r == EMULATION_INTERCEPTED) return 1; diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index a8eb7e0c473cf..e033d55942659 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -171,7 +171,7 @@ static void __init xen_set_mtrr_data(void) /* Only overwrite MTRR state if any MTRR could be got from Xen. */ if (reg) - mtrr_overwrite_state(var, reg, MTRR_TYPE_UNCACHABLE); + guest_force_mtrr_state(var, reg, MTRR_TYPE_UNCACHABLE); #endif } @@ -195,7 +195,7 @@ static void __init xen_pv_init_platform(void) if (xen_initial_domain()) xen_set_mtrr_data(); else - mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK); + guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK); /* Adjust nr_cpu_ids before "enumeration" happens */ xen_smp_count_cpus(); diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index da38de20ae598..cfbced95e944a 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -11,6 +11,7 @@ * * Chris Zankel */ +#define COMPILE_OFFSETS #include #include diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index d6d2b533a5744..f5add569ca830 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -230,10 +230,14 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf, static ssize_t proc_write_simdisk(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - char *tmp = memdup_user_nul(buf, count); + char *tmp; struct simdisk *dev = pde_data(file_inode(file)); int err; + if (count == 0 || count > PAGE_SIZE) + return -EINVAL; + + tmp = memdup_user_nul(buf, count); if (IS_ERR(tmp)) return PTR_ERR(tmp); diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index b1e7415f8439c..d04192ae3dce9 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "blk-cgroup.h" #include "blk-crypto-internal.h" @@ -230,7 +231,9 @@ static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr) bio->bi_status = BLK_STS_RESOURCE; return false; } + bio_chain(split_bio, bio); + trace_block_split(split_bio, bio->bi_iter.bi_sector); submit_bio_noacct(bio); *bio_ptr = split_bio; } diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 156e9bb07abf1..2fb234ab467b1 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -150,9 +150,11 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) return; hctx_for_each_ctx(hctx, ctx, i) - kobject_del(&ctx->kobj); + if (ctx->kobj.state_in_sysfs) + kobject_del(&ctx->kobj); - kobject_del(&hctx->kobj); + if (hctx->kobj.state_in_sysfs) + kobject_del(&hctx->kobj); } static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) diff --git a/block/blk-settings.c b/block/blk-settings.c index f24fffdb6c294..d72a283401c3a 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -552,7 +552,8 @@ static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lb int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t start) { - unsigned int top, bottom, alignment, ret = 0; + unsigned int top, bottom, alignment; + int ret = 0; t->features |= (b->features & BLK_FEAT_INHERIT_MASK); diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index ee2fdab42334f..7e0ce7bf68c99 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -611,11 +611,14 @@ int x509_process_extension(void *context, size_t hdrlen, /* * Get hold of the basicConstraints * v[1] is the encoding size - * (Expect 0x2 or greater, making it 1 or more bytes) + * (Expect 0x00 for empty SEQUENCE with CA:FALSE, or + * 0x03 or greater for non-empty SEQUENCE) * v[2] is the encoding type * (Expect an ASN1_BOOL for the CA) - * v[3] is the contents of the ASN1_BOOL - * (Expect 1 if the CA is TRUE) + * v[3] is the length of the ASN1_BOOL + * (Expect 1 for a single byte boolean) + * v[4] is the contents of the ASN1_BOOL + * (Expect 0xFF if the CA is TRUE) * vlen should match the entire extension size */ if (v[0] != (ASN1_CONS_BIT | ASN1_SEQ)) @@ -624,8 +627,13 @@ int x509_process_extension(void *context, size_t hdrlen, return -EBADMSG; if (v[1] != vlen - 2) return -EBADMSG; - if (vlen >= 4 && v[1] != 0 && v[2] == ASN1_BOOL && v[3] == 1) + /* Empty SEQUENCE means CA:FALSE (default value omitted per DER) */ + if (v[1] == 0) + return 0; + if (vlen >= 5 && v[2] == ASN1_BOOL && v[3] == 1 && v[4] == 0xFF) ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_CA; + else + return -EBADMSG; return 0; } diff --git a/crypto/essiv.c b/crypto/essiv.c index e63fc6442e320..aa7ebc1235274 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -186,9 +186,14 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc) const struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm); struct essiv_aead_request_ctx *rctx = aead_request_ctx(req); struct aead_request *subreq = &rctx->aead_req; + int ivsize = crypto_aead_ivsize(tfm); + int ssize = req->assoclen - ivsize; struct scatterlist *src = req->src; int err; + if (ssize < 0) + return -EINVAL; + crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv); /* @@ -198,19 +203,12 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc) */ rctx->assoc = NULL; if (req->src == req->dst || !enc) { - scatterwalk_map_and_copy(req->iv, req->dst, - req->assoclen - crypto_aead_ivsize(tfm), - crypto_aead_ivsize(tfm), 1); + scatterwalk_map_and_copy(req->iv, req->dst, ssize, ivsize, 1); } else { u8 *iv = (u8 *)aead_request_ctx(req) + tctx->ivoffset; - int ivsize = crypto_aead_ivsize(tfm); - int ssize = req->assoclen - ivsize; struct scatterlist *sg; int nents; - if (ssize < 0) - return -EINVAL; - nents = sg_nents_for_len(req->src, ssize); if (nents < 0) return -EINVAL; diff --git a/crypto/rng.c b/crypto/rng.c index 9d8804e464226..72da96fdfb5dd 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -167,6 +167,11 @@ int crypto_del_default_rng(void) EXPORT_SYMBOL_GPL(crypto_del_default_rng); #endif +static void rng_default_set_ent(struct crypto_rng *tfm, const u8 *data, + unsigned int len) +{ +} + int crypto_register_rng(struct rng_alg *alg) { struct crypto_alg *base = &alg->base; @@ -178,6 +183,9 @@ int crypto_register_rng(struct rng_alg *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_RNG; + if (!alg->set_ent) + alg->set_ent = rng_default_set_ent; + return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_rng); diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index 02561b6cecc64..2d7b3af09e284 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -91,6 +91,8 @@ struct dma_bridge_chan { * response queue's head and tail pointer of this DBC. */ void __iomem *dbc_base; + /* Synchronizes access to Request queue's head and tail pointer */ + struct mutex req_lock; /* Head of list where each node is a memory handle queued in request queue */ struct list_head xfer_list; /* Synchronizes DBC readers during cleanup */ diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c index d8bdab69f8009..b86a8e48e731b 100644 --- a/drivers/accel/qaic/qaic_control.c +++ b/drivers/accel/qaic/qaic_control.c @@ -407,7 +407,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev, return -EINVAL; remaining = in_trans->size - resources->xferred_dma_size; if (remaining == 0) - return 0; + return -EINVAL; if (check_add_overflow(xfer_start_addr, remaining, &end)) return -EINVAL; diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 43aba57b48f05..265eeb4e156fc 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -1357,13 +1357,17 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr goto release_ch_rcu; } + ret = mutex_lock_interruptible(&dbc->req_lock); + if (ret) + goto release_ch_rcu; + head = readl(dbc->dbc_base + REQHP_OFF); tail = readl(dbc->dbc_base + REQTP_OFF); if (head == U32_MAX || tail == U32_MAX) { /* PCI link error */ ret = -ENODEV; - goto release_ch_rcu; + goto unlock_req_lock; } queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail); @@ -1371,11 +1375,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc, head, &tail); if (ret) - goto release_ch_rcu; + goto unlock_req_lock; /* Finalize commit to hardware */ submit_ts = ktime_get_ns(); writel(tail, dbc->dbc_base + REQTP_OFF); + mutex_unlock(&dbc->req_lock); update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts, submit_ts, queue_level); @@ -1383,6 +1388,9 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr if (datapath_polling) schedule_work(&dbc->poll_work); +unlock_req_lock: + if (ret) + mutex_unlock(&dbc->req_lock); release_ch_rcu: srcu_read_unlock(&dbc->ch_lock, rcu_id); unlock_dev_srcu: diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c index 20b653d99e524..5ed49daaf541f 100644 --- a/drivers/accel/qaic/qaic_debugfs.c +++ b/drivers/accel/qaic/qaic_debugfs.c @@ -251,6 +251,9 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d if (ret) goto destroy_workqueue; + dev_set_drvdata(&mhi_dev->dev, qdev); + qdev->bootlog_ch = mhi_dev; + for (i = 0; i < BOOTLOG_POOL_SIZE; i++) { msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL); if (!msg) { @@ -266,8 +269,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d goto mhi_unprepare; } - dev_set_drvdata(&mhi_dev->dev, qdev); - qdev->bootlog_ch = mhi_dev; return 0; mhi_unprepare: diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index 10e711c96a670..cb606c4bb8511 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -422,6 +422,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de return NULL; init_waitqueue_head(&qdev->dbc[i].dbc_release); INIT_LIST_HEAD(&qdev->dbc[i].bo_lists); + ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock); + if (ret) + return NULL; } return qdev; diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c index d50261d05f3a1..515b20d0b698a 100644 --- a/drivers/acpi/acpi_dbg.c +++ b/drivers/acpi/acpi_dbg.c @@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file) return 0; } -static int acpi_aml_read_user(char __user *buf, int len) +static ssize_t acpi_aml_read_user(char __user *buf, size_t len) { - int ret; struct circ_buf *crc = &acpi_aml_io.out_crc; - int n; + ssize_t ret; + size_t n; char *p; ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER); @@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len) /* sync head before removing logs */ smp_rmb(); p = &crc->buf[crc->tail]; - n = min(len, circ_count_to_end(crc)); + n = min_t(size_t, len, circ_count_to_end(crc)); if (copy_to_user(buf, p, n)) { ret = -EFAULT; goto out; @@ -599,8 +599,8 @@ static int acpi_aml_read_user(char __user *buf, int len) static ssize_t acpi_aml_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - int ret = 0; - int size = 0; + ssize_t ret = 0; + ssize_t size = 0; if (!count) return 0; @@ -639,11 +639,11 @@ static ssize_t acpi_aml_read(struct file *file, char __user *buf, return size > 0 ? size : ret; } -static int acpi_aml_write_user(const char __user *buf, int len) +static ssize_t acpi_aml_write_user(const char __user *buf, size_t len) { - int ret; struct circ_buf *crc = &acpi_aml_io.in_crc; - int n; + ssize_t ret; + size_t n; char *p; ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER); @@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len) /* sync tail before inserting cmds */ smp_mb(); p = &crc->buf[crc->head]; - n = min(len, circ_space_to_end(crc)); + n = min_t(size_t, len, circ_space_to_end(crc)); if (copy_from_user(p, buf, n)) { ret = -EFAULT; goto out; @@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len) ret = n; out: acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0); - return n; + return ret; } static ssize_t acpi_aml_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - int ret = 0; - int size = 0; + ssize_t ret = 0; + ssize_t size = 0; if (!count) return 0; diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c index b831cb8e53dc3..b75ceaab716e8 100644 --- a/drivers/acpi/acpi_tad.c +++ b/drivers/acpi/acpi_tad.c @@ -565,6 +565,9 @@ static void acpi_tad_remove(struct platform_device *pdev) pm_runtime_get_sync(dev); + if (dd->capabilities & ACPI_TAD_RT) + sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group); + if (dd->capabilities & ACPI_TAD_DC_WAKE) sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group); diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 6f4fe47c955bd..35460c2072a4a 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -1141,7 +1141,7 @@ struct acpi_port_info { #define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91 #define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92 #define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93 -#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94 +#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93 /***************************************************************************** * diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 989dc01af03fb..bc205b3309043 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c @@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void) return_ACPI_STATUS(AE_OK); } + if (!acpi_gbl_use_global_lock) { + return_ACPI_STATUS(AE_OK); + } + /* Attempt installation of the global lock handler */ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index 58b02e4b254b8..544964c9c530a 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c @@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address, { struct acpi_table_header local_header; +#pragma GCC diagnostic push +#if defined(__GNUC__) && __GNUC__ >= 11 +#pragma GCC diagnostic ignored "-Wstringop-overread" +#endif + if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) { /* FACS only has signature and length fields */ @@ -135,4 +140,5 @@ acpi_tb_print_table_header(acpi_physical_address address, local_header.asl_compiler_id, local_header.asl_compiler_revision)); } +#pragma GCC diagnostic pop } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 65fa3444367a1..11c7e35fafa25 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -92,7 +92,7 @@ enum { struct acpi_battery { struct mutex lock; - struct mutex sysfs_lock; + struct mutex update_lock; struct power_supply *bat; struct power_supply_desc bat_desc; struct acpi_device *device; @@ -903,15 +903,12 @@ static int sysfs_add_battery(struct acpi_battery *battery) static void sysfs_remove_battery(struct acpi_battery *battery) { - mutex_lock(&battery->sysfs_lock); - if (!battery->bat) { - mutex_unlock(&battery->sysfs_lock); + if (!battery->bat) return; - } + battery_hook_remove_battery(battery); power_supply_unregister(battery->bat); battery->bat = NULL; - mutex_unlock(&battery->sysfs_lock); } static void find_battery(const struct dmi_header *dm, void *private) @@ -1071,6 +1068,9 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data) if (!battery) return; + + guard(mutex)(&battery->update_lock); + old = battery->bat; /* * On Acer Aspire V5-573G notifications are sometimes triggered too @@ -1093,21 +1093,22 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data) } static int battery_notify(struct notifier_block *nb, - unsigned long mode, void *_unused) + unsigned long mode, void *_unused) { struct acpi_battery *battery = container_of(nb, struct acpi_battery, pm_nb); - int result; - switch (mode) { - case PM_POST_HIBERNATION: - case PM_POST_SUSPEND: + if (mode == PM_POST_SUSPEND || mode == PM_POST_HIBERNATION) { + guard(mutex)(&battery->update_lock); + if (!acpi_battery_present(battery)) return 0; if (battery->bat) { acpi_battery_refresh(battery); } else { + int result; + result = acpi_battery_get_info(battery); if (result) return result; @@ -1119,7 +1120,6 @@ static int battery_notify(struct notifier_block *nb, acpi_battery_init_alarm(battery); acpi_battery_get_state(battery); - break; } return 0; @@ -1197,6 +1197,8 @@ static int acpi_battery_update_retry(struct acpi_battery *battery) { int retry, ret; + guard(mutex)(&battery->update_lock); + for (retry = 5; retry; retry--) { ret = acpi_battery_update(battery, false); if (!ret) @@ -1207,6 +1209,13 @@ static int acpi_battery_update_retry(struct acpi_battery *battery) return ret; } +static void sysfs_battery_cleanup(struct acpi_battery *battery) +{ + guard(mutex)(&battery->update_lock); + + sysfs_remove_battery(battery); +} + static int acpi_battery_add(struct acpi_device *device) { int result = 0; @@ -1218,15 +1227,21 @@ static int acpi_battery_add(struct acpi_device *device) if (device->dep_unmet) return -EPROBE_DEFER; - battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL); + battery = devm_kzalloc(&device->dev, sizeof(*battery), GFP_KERNEL); if (!battery) return -ENOMEM; battery->device = device; strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME); strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS); device->driver_data = battery; - mutex_init(&battery->lock); - mutex_init(&battery->sysfs_lock); + result = devm_mutex_init(&device->dev, &battery->lock); + if (result) + return result; + + result = devm_mutex_init(&device->dev, &battery->update_lock); + if (result) + return result; + if (acpi_has_method(battery->device->handle, "_BIX")) set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); @@ -1253,10 +1268,7 @@ static int acpi_battery_add(struct acpi_device *device) device_init_wakeup(&device->dev, 0); unregister_pm_notifier(&battery->pm_nb); fail: - sysfs_remove_battery(battery); - mutex_destroy(&battery->lock); - mutex_destroy(&battery->sysfs_lock); - kfree(battery); + sysfs_battery_cleanup(battery); return result; } @@ -1275,11 +1287,10 @@ static void acpi_battery_remove(struct acpi_device *device) device_init_wakeup(&device->dev, 0); unregister_pm_notifier(&battery->pm_nb); - sysfs_remove_battery(battery); - mutex_destroy(&battery->lock); - mutex_destroy(&battery->sysfs_lock); - kfree(battery); + guard(mutex)(&battery->update_lock); + + sysfs_remove_battery(battery); } #ifdef CONFIG_PM_SLEEP @@ -1296,6 +1307,9 @@ static int acpi_battery_resume(struct device *dev) return -EINVAL; battery->update_time = 0; + + guard(mutex)(&battery->update_lock); + acpi_battery_update(battery, true); return 0; } diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index ae035b93da087..3eb56b77cb6d9 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2637,7 +2637,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, if (ndr_desc->target_node == NUMA_NO_NODE) { ndr_desc->target_node = phys_to_target_node(spa->address); dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]", - NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); + NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end); } /* diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 0888e4d618d53..b524cf27213d4 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1410,6 +1410,9 @@ int acpi_processor_power_init(struct acpi_processor *pr) if (retval) { if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); + + per_cpu(acpi_cpuidle_device, pr->id) = NULL; + kfree(dev); return retval; } acpi_processor_registered++; diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index e9186339f6e6b..342e7cef723cc 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -83,6 +83,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc, struct fwnode_handle *parent) { struct acpi_data_node *dn; + acpi_handle scope = NULL; bool result; if (acpi_graph_ignore_port(handle)) @@ -98,29 +99,35 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc, INIT_LIST_HEAD(&dn->data.properties); INIT_LIST_HEAD(&dn->data.subnodes); - result = acpi_extract_properties(handle, desc, &dn->data); - - if (handle) { - acpi_handle scope; - acpi_status status; + /* + * The scope for the completion of relative pathname segments and + * subnode object lookup is the one of the namespace node (device) + * containing the object that has returned the package. That is, it's + * the scope of that object's parent device. + */ + if (handle) + acpi_get_parent(handle, &scope); - /* - * The scope for the subnode object lookup is the one of the - * namespace node (device) containing the object that has - * returned the package. That is, it's the scope of that - * object's parent. - */ - status = acpi_get_parent(handle, &scope); - if (ACPI_SUCCESS(status) - && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, - &dn->fwnode)) - result = true; - } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data, - &dn->fwnode)) { + /* + * Extract properties from the _DSD-equivalent package pointed to by + * desc and use scope (if not NULL) for the completion of relative + * pathname segments. + * + * The extracted properties will be held in the new data node dn. + */ + result = acpi_extract_properties(scope, desc, &dn->data); + /* + * Look for subnodes in the _DSD-equivalent package pointed to by desc + * and create child nodes of dn if there are any. + */ + if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode)) result = true; - } if (result) { + /* + * This will be NULL if the desc package is embedded in an outer + * _DSD-equivalent package and its scope cannot be determined. + */ dn->handle = handle; dn->data.pointer = desc; list_add_tail(&dn->sibling, list); @@ -132,35 +139,21 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc, return false; } -static bool acpi_nondev_subnode_data_ok(acpi_handle handle, - const union acpi_object *link, - struct list_head *list, - struct fwnode_handle *parent) -{ - struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; - acpi_status status; - - status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf, - ACPI_TYPE_PACKAGE); - if (ACPI_FAILURE(status)) - return false; - - if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list, - parent)) - return true; - - ACPI_FREE(buf.pointer); - return false; -} - static bool acpi_nondev_subnode_ok(acpi_handle scope, const union acpi_object *link, struct list_head *list, struct fwnode_handle *parent) { + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; acpi_handle handle; acpi_status status; + /* + * If the scope is unknown, the _DSD-equivalent package being parsed + * was embedded in an outer _DSD-equivalent package as a result of + * direct evaluation of an object pointed to by a reference. In that + * case, using a pathname as the target object pointer is invalid. + */ if (!scope) return false; @@ -169,7 +162,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope, if (ACPI_FAILURE(status)) return false; - return acpi_nondev_subnode_data_ok(handle, link, list, parent); + status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf, + ACPI_TYPE_PACKAGE); + if (ACPI_FAILURE(status)) + return false; + + if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list, + parent)) + return true; + + ACPI_FREE(buf.pointer); + return false; } static bool acpi_add_nondev_subnodes(acpi_handle scope, @@ -180,9 +183,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope, bool ret = false; int i; + /* + * Every element in the links package is expected to represent a link + * to a non-device node in a tree containing device-specific data. + */ for (i = 0; i < links->package.count; i++) { union acpi_object *link, *desc; - acpi_handle handle; bool result; link = &links->package.elements[i]; @@ -190,26 +196,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope, if (link->package.count != 2) continue; - /* The first one must be a string. */ + /* The first one (the key) must be a string. */ if (link->package.elements[0].type != ACPI_TYPE_STRING) continue; - /* The second one may be a string, a reference or a package. */ + /* The second one (the target) may be a string or a package. */ switch (link->package.elements[1].type) { case ACPI_TYPE_STRING: + /* + * The string is expected to be a full pathname or a + * pathname segment relative to the given scope. That + * pathname is expected to point to an object returning + * a package that contains _DSD-equivalent information. + */ result = acpi_nondev_subnode_ok(scope, link, list, parent); break; - case ACPI_TYPE_LOCAL_REFERENCE: - handle = link->package.elements[1].reference.handle; - result = acpi_nondev_subnode_data_ok(handle, link, list, - parent); - break; case ACPI_TYPE_PACKAGE: + /* + * This happens when a reference is used in AML to + * point to the target. Since the target is expected + * to be a named object, a reference to it will cause it + * to be avaluated in place and its return package will + * be embedded in the links package at the location of + * the reference. + * + * The target package is expected to contain _DSD- + * equivalent information, but the scope in which it + * is located in the original AML is unknown. Thus + * it cannot contain pathname segments represented as + * strings because there is no way to build full + * pathnames out of them. + */ + acpi_handle_debug(scope, "subnode %s: Unknown scope\n", + link->package.elements[0].string.pointer); desc = &link->package.elements[1]; result = acpi_nondev_subnode_extract(desc, NULL, link, list, parent); break; + case ACPI_TYPE_LOCAL_REFERENCE: + /* + * It is not expected to see any local references in + * the links package because referencing a named object + * should cause it to be evaluated in place. + */ + acpi_handle_info(scope, "subnode %s: Unexpected reference\n", + link->package.elements[0].string.pointer); + fallthrough; default: result = false; break; @@ -369,6 +402,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data) struct acpi_data_node *dn; list_for_each_entry(dn, &data->subnodes, sibling) { + if (!dn->handle) + continue; + acpi_detach_data(dn->handle, acpi_nondev_subnode_tag); acpi_untie_nondev_subnodes(&dn->data); @@ -383,6 +419,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data) acpi_status status; bool ret; + if (!dn->handle) + continue; + status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn); if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) { acpi_handle_err(dn->handle, "Can't tag data node\n"); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index bdf09e8b898d0..186c182fd6565 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -846,17 +846,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, } else { if (!internal) node->local_weak_refs++; - if (!node->has_weak_ref && list_empty(&node->work.entry)) { - if (target_list == NULL) { - pr_err("invalid inc weak node for %d\n", - node->debug_id); - return -EINVAL; - } - /* - * See comment above - */ + if (!node->has_weak_ref && target_list && list_empty(&node->work.entry)) binder_enqueue_work_ilocked(&node->work, target_list); - } } return 0; } diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h index 956f1bd087d1c..c7299ce8b3741 100644 --- a/drivers/android/dbitmap.h +++ b/drivers/android/dbitmap.h @@ -37,6 +37,7 @@ static inline void dbitmap_free(struct dbitmap *dmap) { dmap->nbits = 0; kfree(dmap->map); + dmap->map = NULL; } /* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */ diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 3ebe77566788b..176429904d39a 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -341,7 +341,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) * frequency (by keeping the initial capacity_freq_ref value). */ cpu_clk = of_clk_get(cpu_node, 0); - if (!PTR_ERR_OR_ZERO(cpu_clk)) { + if (!IS_ERR_OR_NULL(cpu_clk)) { per_cpu(capacity_freq_ref, cpu) = clk_get_rate(cpu_clk) / HZ_PER_KHZ; clk_put(cpu_clk); diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index c795edad1b969..e9a8bd9b20ea0 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -23,50 +23,46 @@ struct devcd_entry { void *data; size_t datalen; /* - * Here, mutex is required to serialize the calls to del_wk work between - * user/kernel space which happens when devcd is added with device_add() - * and that sends uevent to user space. User space reads the uevents, - * and calls to devcd_data_write() which try to modify the work which is - * not even initialized/queued from devcoredump. + * There are 2 races for which mutex is required. * + * The first race is between device creation and userspace writing to + * schedule immediately destruction. * + * This race is handled by arming the timer before device creation, but + * when device creation fails the timer still exists. * - * cpu0(X) cpu1(Y) + * To solve this, hold the mutex during device_add(), and set + * init_completed on success before releasing the mutex. * - * dev_coredump() uevent sent to user space - * device_add() ======================> user space process Y reads the - * uevents writes to devcd fd - * which results into writes to + * That way the timer will never fire until device_add() is called, + * it will do nothing if init_completed is not set. The timer is also + * cancelled in that case. * - * devcd_data_write() - * mod_delayed_work() - * try_to_grab_pending() - * del_timer() - * debug_assert_init() - * INIT_DELAYED_WORK() - * schedule_delayed_work() - * - * - * Also, mutex alone would not be enough to avoid scheduling of - * del_wk work after it get flush from a call to devcd_free() - * mentioned as below. - * - * disabled_store() - * devcd_free() - * mutex_lock() devcd_data_write() - * flush_delayed_work() - * mutex_unlock() - * mutex_lock() - * mod_delayed_work() - * mutex_unlock() - * So, delete_work flag is required. + * The second race involves multiple parallel invocations of devcd_free(), + * add a deleted flag so only 1 can call the destructor. */ struct mutex mutex; - bool delete_work; + bool init_completed, deleted; struct module *owner; ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen); void (*free)(void *data); + /* + * If nothing interferes and device_add() was returns success, + * del_wk will destroy the device after the timer fires. + * + * Multiple userspace processes can interfere in the working of the timer: + * - Writing to the coredump will reschedule the timer to run immediately, + * if still armed. + * + * This is handled by using "if (cancel_delayed_work()) { + * schedule_delayed_work() }", to prevent re-arming after having + * been previously fired. + * - Writing to /sys/class/devcoredump/disabled will destroy the + * coredump synchronously. + * This is handled by using disable_delayed_work_sync(), and then + * checking if deleted flag is set with &devcd->mutex held. + */ struct delayed_work del_wk; struct device *failing_dev; }; @@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev) kfree(devcd); } +static void __devcd_del(struct devcd_entry *devcd) +{ + devcd->deleted = true; + device_del(&devcd->devcd_dev); + put_device(&devcd->devcd_dev); +} + static void devcd_del(struct work_struct *wk) { struct devcd_entry *devcd; + bool init_completed; devcd = container_of(wk, struct devcd_entry, del_wk.work); - device_del(&devcd->devcd_dev); - put_device(&devcd->devcd_dev); + /* devcd->mutex serializes against dev_coredumpm_timeout */ + mutex_lock(&devcd->mutex); + init_completed = devcd->init_completed; + mutex_unlock(&devcd->mutex); + + if (init_completed) + __devcd_del(devcd); } static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, @@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct devcd_entry *devcd = dev_to_devcd(dev); - mutex_lock(&devcd->mutex); - if (!devcd->delete_work) { - devcd->delete_work = true; - mod_delayed_work(system_wq, &devcd->del_wk, 0); - } - mutex_unlock(&devcd->mutex); + /* + * Although it's tempting to use mod_delayed work here, + * that will cause a reschedule if the timer already fired. + */ + if (cancel_delayed_work(&devcd->del_wk)) + schedule_delayed_work(&devcd->del_wk, 0); return count; } @@ -155,11 +164,21 @@ static int devcd_free(struct device *dev, void *data) { struct devcd_entry *devcd = dev_to_devcd(dev); + /* + * To prevent a race with devcd_data_write(), disable work and + * complete manually instead. + * + * We cannot rely on the return value of + * disable_delayed_work_sync() here, because it might be in the + * middle of a cancel_delayed_work + schedule_delayed_work pair. + * + * devcd->mutex here guards against multiple parallel invocations + * of devcd_free(). + */ + disable_delayed_work_sync(&devcd->del_wk); mutex_lock(&devcd->mutex); - if (!devcd->delete_work) - devcd->delete_work = true; - - flush_delayed_work(&devcd->del_wk); + if (!devcd->deleted) + __devcd_del(devcd); mutex_unlock(&devcd->mutex); return 0; } @@ -183,12 +202,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri * put_device() <- last reference * error = fn(dev, data) devcd_dev_release() * devcd_free(dev, data) kfree(devcd) - * mutex_lock(&devcd->mutex); * * - * In the above diagram, It looks like disabled_store() would be racing with parallely - * running devcd_del() and result in memory abort while acquiring devcd->mutex which - * is called after kfree of devcd memory after dropping its last reference with + * In the above diagram, it looks like disabled_store() would be racing with parallelly + * running devcd_del() and result in memory abort after dropping its last reference with * put_device(). However, this will not happens as fn(dev, data) runs * with its own reference to device via klist_node so it is not its last reference. * so, above situation would not occur. @@ -376,7 +393,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner, devcd->read = read; devcd->free = free; devcd->failing_dev = get_device(dev); - devcd->delete_work = false; + devcd->deleted = false; mutex_init(&devcd->mutex); device_initialize(&devcd->devcd_dev); @@ -385,8 +402,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner, atomic_inc_return(&devcd_count)); devcd->devcd_dev.class = &devcd_class; - mutex_lock(&devcd->mutex); dev_set_uevent_suppress(&devcd->devcd_dev, true); + + /* devcd->mutex prevents devcd_del() completing until init finishes */ + mutex_lock(&devcd->mutex); + devcd->init_completed = false; + INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); + schedule_delayed_work(&devcd->del_wk, timeout); + if (device_add(&devcd->devcd_dev)) goto put_device; @@ -403,13 +426,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner, dev_set_uevent_suppress(&devcd->devcd_dev, false); kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); - INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); - schedule_delayed_work(&devcd->del_wk, timeout); + + /* + * Safe to run devcd_del() now that we are done with devcd_dev. + * Alternatively we could have taken a ref on devcd_dev before + * dropping the lock. + */ + devcd->init_completed = true; mutex_unlock(&devcd->mutex); return; put_device: - put_device(&devcd->devcd_dev); mutex_unlock(&devcd->mutex); + cancel_delayed_work_sync(&devcd->del_wk); + put_device(&devcd->devcd_dev); + put_module: module_put(owner); free: diff --git a/drivers/base/node.c b/drivers/base/node.c index eb72580288e62..deccfe68214ec 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -879,6 +879,10 @@ int __register_one_node(int nid) node_devices[nid] = node; error = register_node(node_devices[nid], nid); + if (error) { + node_devices[nid] = NULL; + return error; + } /* link cpu under this node */ for_each_present_cpu(cpu) { diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index faf4cdec23f04..abb16a5bb7967 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -628,8 +628,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy if (dev->power.syscore || dev->power.direct_complete) goto Out; - if (!dev->power.is_noirq_suspended) + if (!dev->power.is_noirq_suspended) { + /* + * This means that system suspend has been aborted in the noirq + * phase before invoking the noirq suspend callback for the + * device, so if device_suspend_late() has left it in suspend, + * device_resume_early() should leave it in suspend either in + * case the early resume of it depends on the noirq resume that + * has not run. + */ + if (dev_pm_skip_suspend(dev)) + dev->power.must_resume = false; + goto Out; + } if (!dpm_wait_for_superior(dev, async)) goto Out; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index c7ec69597a955..d8aaa5b61628b 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1554,6 +1554,32 @@ void pm_runtime_enable(struct device *dev) } EXPORT_SYMBOL_GPL(pm_runtime_enable); +static void pm_runtime_set_suspended_action(void *data) +{ + pm_runtime_set_suspended(data); +} + +/** + * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_set_active_enabled(struct device *dev) +{ + int err; + + err = pm_runtime_set_active(dev); + if (err) + return err; + + err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev); + if (err) + return err; + + return devm_pm_runtime_enable(dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled); + static void pm_runtime_disable_action(void *data) { pm_runtime_dont_use_autosuspend(data); @@ -1576,6 +1602,24 @@ int devm_pm_runtime_enable(struct device *dev) } EXPORT_SYMBOL_GPL(devm_pm_runtime_enable); +static void pm_runtime_put_noidle_action(void *data) +{ + pm_runtime_put_noidle(data); +} + +/** + * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_get_noresume(struct device *dev) +{ + pm_runtime_get_noresume(dev); + + return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume); + /** * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index de4e2f3db942a..66b3840bd96e3 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -828,7 +828,7 @@ struct regmap *__regmap_init(struct device *dev, map->read_flag_mask = bus->read_flag_mask; } - if (config && config->read && config->write) { + if (config->read && config->write) { map->reg_read = _regmap_bus_read; if (config->reg_update_bits) map->reg_update_bits = config->reg_update_bits; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index db9b5164cccaf..dd41171922019 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -536,8 +536,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, return -EBADF; error = loop_check_backing_file(file); - if (error) + if (error) { + fput(file); return error; + } /* suppress uevents while reconfiguring the device */ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); @@ -973,8 +975,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, return -EBADF; error = loop_check_backing_file(file); - if (error) + if (error) { + fput(file); return error; + } is_loop = is_loop_device(file); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index c705acc4d6f4b..deb298371a6a3 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -52,6 +52,7 @@ static DEFINE_IDR(nbd_index_idr); static DEFINE_MUTEX(nbd_index_mutex); static struct workqueue_struct *nbd_del_wq; +static struct cred *nbd_cred; static int nbd_total_devices = 0; struct nbd_sock { @@ -557,6 +558,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send, int result; struct msghdr msg = {} ; unsigned int noreclaim_flag; + const struct cred *old_cred; if (unlikely(!sock)) { dev_err_ratelimited(disk_to_dev(nbd->disk), @@ -565,6 +567,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send, return -EINVAL; } + old_cred = override_creds(nbd_cred); + msg.msg_iter = *iter; noreclaim_flag = memalloc_noreclaim_save(); @@ -589,6 +593,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send, memalloc_noreclaim_restore(noreclaim_flag); + revert_creds(old_cred); + return result; } @@ -1156,6 +1162,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, if (!sock) return NULL; + if (!sk_is_tcp(sock->sk) && + !sk_is_stream_unix(sock->sk)) { + dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n"); + *err = -EINVAL; + sockfd_put(sock); + return NULL; + } + if (sock->ops->shutdown == sock_no_shutdown) { dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); *err = -EINVAL; @@ -2597,7 +2611,15 @@ static int __init nbd_init(void) return -ENOMEM; } + nbd_cred = prepare_kernel_cred(&init_task); + if (!nbd_cred) { + destroy_workqueue(nbd_del_wq); + unregister_blkdev(NBD_MAJOR, "nbd"); + return -ENOMEM; + } + if (genl_register_family(&nbd_genl_family)) { + put_cred(nbd_cred); destroy_workqueue(nbd_del_wq); unregister_blkdev(NBD_MAJOR, "nbd"); return -EINVAL; @@ -2652,6 +2674,7 @@ static void __exit nbd_cleanup(void) /* Also wait for nbd_dev_remove_work() completes */ destroy_workqueue(nbd_del_wq); + put_cred(nbd_cred); idr_destroy(&nbd_index_idr); unregister_blkdev(NBD_MAJOR, "nbd"); } diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index f10369ad90f76..ceb7aeca5d9bd 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -223,7 +223,7 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu static unsigned long g_cache_size; module_param_named(cache_size, g_cache_size, ulong, 0444); -MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)"); +MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)"); static bool g_fua = true; module_param_named(fua, g_fua, bool, 0444); diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 85df941afb6cf..3a4db68fc2e63 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -2734,20 +2734,32 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver) struct btintel_dsbr_cmd cmd; struct sk_buff *skb; + u32 dsbr, cnvi; u8 status; - u32 dsbr; - bool apply_dsbr; int err; - /* DSBR command needs to be sent for BlazarI + B0 step product after - * downloading IML image. + cnvi = ver->cnvi_top & 0xfff; + /* DSBR command needs to be sent for, + * 1. BlazarI or BlazarIW + B0 step product in IML image. + * 2. Gale Peak2 or BlazarU in OP image. */ - apply_dsbr = (ver->img_type == BTINTEL_IMG_IML && - ((ver->cnvi_top & 0xfff) == BTINTEL_CNVI_BLAZARI) && - INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01); - if (!apply_dsbr) + switch (cnvi) { + case BTINTEL_CNVI_BLAZARI: + case BTINTEL_CNVI_BLAZARIW: + if (ver->img_type == BTINTEL_IMG_IML && + INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01) + break; + return 0; + case BTINTEL_CNVI_GAP: + case BTINTEL_CNVI_BLAZARU: + if (ver->img_type == BTINTEL_IMG_OP && + hdev->bus == HCI_USB) + break; return 0; + default: + return 0; + } dsbr = 0; err = btintel_uefi_get_dsbr(&dsbr); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index b448c67e8ed94..fa43eb1378218 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -53,6 +53,9 @@ struct intel_tlv { } __packed; #define BTINTEL_CNVI_BLAZARI 0x900 +#define BTINTEL_CNVI_BLAZARIW 0x901 +#define BTINTEL_CNVI_GAP 0x910 +#define BTINTEL_CNVI_BLAZARU 0x930 #define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */ #define BTINTEL_IMG_IML 0x02 /* Intermediate image */ diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index d7aaaeb4fe326..c07d57eaca3bf 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -514,6 +514,8 @@ static const struct usb_device_id quirks_table[] = { /* Realtek 8851BU Bluetooth devices */ { USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852AE Bluetooth devices */ { USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK | diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 4575d9a4e5ed6..dbc8d8f14ce7d 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -1103,6 +1103,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) * Get physical address of MC portal for the root DPRC: */ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!plat_res) + return -EINVAL; + mc_portal_phys_addr = plat_res->start; mc_portal_size = resource_size(plat_res); mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff; diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index b3eafcf2a2c50..cdea24e929195 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -403,17 +403,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, { struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; struct device *dev = &mhi_cntrl->mhi_dev->dev; - size_t tr_len, read_offset, write_offset; + size_t tr_len, read_offset; struct mhi_ep_buf_info buf_info = {}; u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ring_element *el; - bool tr_done = false; void *buf_addr; - u32 buf_left; int ret; - buf_left = len; - do { /* Don't process the transfer ring if the channel is not in RUNNING state */ if (mhi_chan->state != MHI_CH_STATE_RUNNING) { @@ -426,24 +422,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, /* Check if there is data pending to be read from previous read operation */ if (mhi_chan->tre_bytes_left) { dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); - tr_len = min(buf_left, mhi_chan->tre_bytes_left); + tr_len = min(len, mhi_chan->tre_bytes_left); } else { mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); mhi_chan->tre_bytes_left = mhi_chan->tre_size; - tr_len = min(buf_left, mhi_chan->tre_size); + tr_len = min(len, mhi_chan->tre_size); } read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; - write_offset = len - buf_left; buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); if (!buf_addr) return -ENOMEM; buf_info.host_addr = mhi_chan->tre_loc + read_offset; - buf_info.dev_addr = buf_addr + write_offset; + buf_info.dev_addr = buf_addr; buf_info.size = tr_len; buf_info.cb = mhi_ep_read_completion; buf_info.cb_buf = buf_addr; @@ -459,16 +454,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_buf_addr; } - buf_left -= tr_len; mhi_chan->tre_bytes_left -= tr_len; - if (!mhi_chan->tre_bytes_left) { - if (MHI_TRE_DATA_GET_IEOT(el)) - tr_done = true; - + if (!mhi_chan->tre_bytes_left) mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; - } - } while (buf_left && !tr_done); + /* Read until the some buffer is left or the ring becomes not empty */ + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); return 0; @@ -502,15 +493,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring) mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); } else { /* UL channel */ - do { - ret = mhi_ep_read_channel(mhi_cntrl, ring); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); - return ret; - } - - /* Read until the ring becomes empty */ - } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); + ret = mhi_ep_read_channel(mhi_cntrl, ring); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); + return ret; + } } return 0; diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index a9b1f8beee7bc..223fc54c45b3e 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -194,7 +194,6 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) { struct mhi_event *mhi_event = mhi_cntrl->mhi_event; - struct device *dev = &mhi_cntrl->mhi_dev->dev; unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; int i, ret; @@ -221,7 +220,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) continue; if (mhi_event->irq >= mhi_cntrl->nr_irqs) { - dev_err(dev, "irq %d not available for event ring\n", + dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n", mhi_event->irq); ret = -EINVAL; goto error_request; @@ -232,7 +231,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) irq_flags, "mhi", mhi_event); if (ret) { - dev_err(dev, "Error requesting irq:%d for ev:%d\n", + dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n", mhi_cntrl->irq[mhi_event->irq], i); goto error_request; } diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c index e55f1716cfcb2..d7bade143998e 100644 --- a/drivers/cdx/cdx_msi.c +++ b/drivers/cdx/cdx_msi.c @@ -165,7 +165,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev) struct device_node *parent_node; struct irq_domain *parent; - fwnode_handle = of_node_to_fwnode(np); + fwnode_handle = of_fwnode_handle(np); parent_node = of_parse_phandle(np, "msi-map", 1); if (!parent_node) { @@ -173,7 +173,8 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev) return NULL; } - parent = irq_find_matching_fwnode(of_node_to_fwnode(parent_node), DOMAIN_BUS_NEXUS); + parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS); + of_node_put(parent_node); if (!parent || !msi_get_domain_info(parent)) { dev_err(dev, "unable to locate ITS domain\n"); return NULL; diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index b51d9e243f351..f0dde77f50b42 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -286,6 +286,7 @@ config HW_RANDOM_INGENIC_TRNG config HW_RANDOM_NOMADIK tristate "ST-Ericsson Nomadik Random Number Generator support" depends on ARCH_NOMADIK || COMPILE_TEST + depends on ARM_AMBA default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c index 36c34252b4f63..3c514b4fbc8ae 100644 --- a/drivers/char/hw_random/ks-sa-rng.c +++ b/drivers/char/hw_random/ks-sa-rng.c @@ -231,6 +231,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev) if (IS_ERR(ks_sa_rng->regmap_cfg)) return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n"); + ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(ks_sa_rng->clk)) + return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n"); + pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) { diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index ecfcb50302f6c..efda90dcf5b3d 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c @@ -122,10 +122,10 @@ struct si_sm_data { unsigned long error0_timeout; }; -static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs, - struct si_sm_io *io, enum kcs_states state) +static unsigned int init_kcs_data(struct si_sm_data *kcs, + struct si_sm_io *io) { - kcs->state = state; + kcs->state = KCS_IDLE; kcs->io = io; kcs->write_pos = 0; kcs->write_count = 0; @@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs, return 2; } -static unsigned int init_kcs_data(struct si_sm_data *kcs, - struct si_sm_io *io) -{ - return init_kcs_data_with_state(kcs, io, KCS_IDLE); -} - static inline unsigned char read_status(struct si_sm_data *kcs) { return kcs->io->inputb(kcs->io, 1); @@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, if (size > MAX_KCS_WRITE_SIZE) return IPMI_REQ_LEN_EXCEEDED_ERR; - if (kcs->state != KCS_IDLE) { + if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) { dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state); return IPMI_NOT_IN_MY_STATE_ERR; } @@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) } if (kcs->state == KCS_HOSED) { - init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0); + init_kcs_data(kcs, kcs->io); return SI_SM_HOSED; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 09405668ebb37..99fe013219712 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -39,7 +39,9 @@ #define IPMI_DRIVER_VERSION "39.2" -static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user); +static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, + struct ipmi_user *user); static int ipmi_init_msghandler(void); static void smi_recv_work(struct work_struct *t); static void handle_new_recv_msgs(struct ipmi_smi *intf); @@ -939,13 +941,11 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) * risk. At this moment, simply skip it in that case. */ ipmi_free_recv_msg(msg); - atomic_dec(&msg->user->nr_msgs); } else { int index; struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); if (user) { - atomic_dec(&user->nr_msgs); user->handler->ipmi_recv_hndl(msg, user->handler_data); release_ipmi_user(user, index); } else { @@ -1634,8 +1634,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val) spin_unlock_irqrestore(&intf->events_lock, flags); list_for_each_entry_safe(msg, msg2, &msgs, link) { - msg->user = user; - kref_get(&user->refcount); + ipmi_set_recv_msg_user(msg, user); deliver_local_response(intf, msg); } @@ -2309,22 +2308,18 @@ static int i_ipmi_request(struct ipmi_user *user, struct ipmi_recv_msg *recv_msg; int rv = 0; - if (user) { - if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { - /* Decrement will happen at the end of the routine. */ - rv = -EBUSY; - goto out; - } - } - - if (supplied_recv) + if (supplied_recv) { recv_msg = supplied_recv; - else { - recv_msg = ipmi_alloc_recv_msg(); - if (recv_msg == NULL) { - rv = -ENOMEM; - goto out; + recv_msg->user = user; + if (user) { + atomic_inc(&user->nr_msgs); + /* The put happens when the message is freed. */ + kref_get(&user->refcount); } + } else { + recv_msg = ipmi_alloc_recv_msg(user); + if (IS_ERR(recv_msg)) + return PTR_ERR(recv_msg); } recv_msg->user_msg_data = user_msg_data; @@ -2335,8 +2330,7 @@ static int i_ipmi_request(struct ipmi_user *user, if (smi_msg == NULL) { if (!supplied_recv) ipmi_free_recv_msg(recv_msg); - rv = -ENOMEM; - goto out; + return -ENOMEM; } } @@ -2346,10 +2340,6 @@ static int i_ipmi_request(struct ipmi_user *user, goto out_err; } - recv_msg->user = user; - if (user) - /* The put happens when the message is freed. */ - kref_get(&user->refcount); recv_msg->msgid = msgid; /* * Store the message to send in the receive message so timeout @@ -2378,8 +2368,10 @@ static int i_ipmi_request(struct ipmi_user *user, if (rv) { out_err: - ipmi_free_smi_msg(smi_msg); - ipmi_free_recv_msg(recv_msg); + if (!supplied_smi) + ipmi_free_smi_msg(smi_msg); + if (!supplied_recv) + ipmi_free_recv_msg(recv_msg); } else { dev_dbg(intf->si_dev, "Send: %*ph\n", smi_msg->data_size, smi_msg->data); @@ -2388,9 +2380,6 @@ static int i_ipmi_request(struct ipmi_user *user, } rcu_read_unlock(); -out: - if (rv && user) - atomic_dec(&user->nr_msgs); return rv; } @@ -3882,7 +3871,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_ipmb_addr *ipmb_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ @@ -3903,9 +3892,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -3940,47 +3928,41 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, rv = -1; } rcu_read_unlock(); - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; - ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; - ipmb_addr->slave_addr = msg->rsp[6]; - ipmb_addr->lun = msg->rsp[7] & 3; - ipmb_addr->channel = msg->rsp[3] & 0xf; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; + ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; + ipmb_addr->slave_addr = msg->rsp[6]; + ipmb_addr->lun = msg->rsp[7] & 3; + ipmb_addr->channel = msg->rsp[3] & 0xf; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = msg->rsp[7] >> 2; - recv_msg->msg.netfn = msg->rsp[4] >> 2; - recv_msg->msg.cmd = msg->rsp[8]; - recv_msg->msg.data = recv_msg->msg_data; + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[7] >> 2; + recv_msg->msg.netfn = msg->rsp[4] >> 2; + recv_msg->msg.cmd = msg->rsp[8]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * We chop off 10, not 9 bytes because the checksum - * at the end also needs to be removed. - */ - recv_msg->msg.data_len = msg->rsp_size - 10; - memcpy(recv_msg->msg_data, &msg->rsp[9], - msg->rsp_size - 10); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * We chop off 10, not 9 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 10; + memcpy(recv_msg->msg_data, &msg->rsp[9], + msg->rsp_size - 10); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -3993,7 +3975,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, int rv = 0; struct ipmi_user *user = NULL; struct ipmi_ipmb_direct_addr *daddr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; unsigned char netfn = msg->rsp[0] >> 2; unsigned char cmd = msg->rsp[3]; @@ -4002,9 +3984,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4031,44 +4012,38 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, rv = -1; } rcu_read_unlock(); - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; - daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; - daddr->channel = 0; - daddr->slave_addr = msg->rsp[1]; - daddr->rs_lun = msg->rsp[0] & 3; - daddr->rq_lun = msg->rsp[2] & 3; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; + daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; + daddr->channel = 0; + daddr->slave_addr = msg->rsp[1]; + daddr->rs_lun = msg->rsp[0] & 3; + daddr->rq_lun = msg->rsp[2] & 3; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = (msg->rsp[2] >> 2); - recv_msg->msg.netfn = msg->rsp[0] >> 2; - recv_msg->msg.cmd = msg->rsp[3]; - recv_msg->msg.data = recv_msg->msg_data; - - recv_msg->msg.data_len = msg->rsp_size - 4; - memcpy(recv_msg->msg_data, msg->rsp + 4, - msg->rsp_size - 4); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = (msg->rsp[2] >> 2); + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[3]; + recv_msg->msg.data = recv_msg->msg_data; + + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, msg->rsp + 4, + msg->rsp_size - 4); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4182,7 +4157,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_lan_addr *lan_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; if (msg->rsp_size < 12) { /* Message not big enough, just ignore it. */ @@ -4203,9 +4178,8 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4217,49 +4191,44 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, * them to be freed. */ rv = 0; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; - lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; - lan_addr->session_handle = msg->rsp[4]; - lan_addr->remote_SWID = msg->rsp[8]; - lan_addr->local_SWID = msg->rsp[5]; - lan_addr->lun = msg->rsp[9] & 3; - lan_addr->channel = msg->rsp[3] & 0xf; - lan_addr->privilege = msg->rsp[3] >> 4; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; + lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; + lan_addr->session_handle = msg->rsp[4]; + lan_addr->remote_SWID = msg->rsp[8]; + lan_addr->local_SWID = msg->rsp[5]; + lan_addr->lun = msg->rsp[9] & 3; + lan_addr->channel = msg->rsp[3] & 0xf; + lan_addr->privilege = msg->rsp[3] >> 4; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = msg->rsp[9] >> 2; - recv_msg->msg.netfn = msg->rsp[6] >> 2; - recv_msg->msg.cmd = msg->rsp[10]; - recv_msg->msg.data = recv_msg->msg_data; + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[9] >> 2; + recv_msg->msg.netfn = msg->rsp[6] >> 2; + recv_msg->msg.cmd = msg->rsp[10]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * We chop off 12, not 11 bytes because the checksum - * at the end also needs to be removed. - */ - recv_msg->msg.data_len = msg->rsp_size - 12; - memcpy(recv_msg->msg_data, &msg->rsp[11], - msg->rsp_size - 12); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * We chop off 12, not 11 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 12; + memcpy(recv_msg->msg_data, &msg->rsp[11], + msg->rsp_size - 12); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4281,7 +4250,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_system_interface_addr *smi_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; /* * We expect the OEM SW to perform error checking @@ -4310,9 +4279,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4325,48 +4293,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, */ rv = 0; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* - * OEM Messages are expected to be delivered via - * the system interface to SMS software. We might - * need to visit this again depending on OEM - * requirements - */ - smi_addr = ((struct ipmi_system_interface_addr *) - &recv_msg->addr); - smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; - smi_addr->channel = IPMI_BMC_CHANNEL; - smi_addr->lun = msg->rsp[0] & 3; - - recv_msg->user = user; - recv_msg->user_msg_data = NULL; - recv_msg->recv_type = IPMI_OEM_RECV_TYPE; - recv_msg->msg.netfn = msg->rsp[0] >> 2; - recv_msg->msg.cmd = msg->rsp[1]; - recv_msg->msg.data = recv_msg->msg_data; + } else if (!IS_ERR(recv_msg)) { + /* + * OEM Messages are expected to be delivered via + * the system interface to SMS software. We might + * need to visit this again depending on OEM + * requirements + */ + smi_addr = ((struct ipmi_system_interface_addr *) + &recv_msg->addr); + smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr->channel = IPMI_BMC_CHANNEL; + smi_addr->lun = msg->rsp[0] & 3; + + recv_msg->user_msg_data = NULL; + recv_msg->recv_type = IPMI_OEM_RECV_TYPE; + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[1]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * The message starts at byte 4 which follows the - * Channel Byte in the "GET MESSAGE" command - */ - recv_msg->msg.data_len = msg->rsp_size - 4; - memcpy(recv_msg->msg_data, &msg->rsp[4], - msg->rsp_size - 4); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * The message starts at byte 4 which follows the + * Channel Byte in the "GET MESSAGE" command + */ + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, &msg->rsp[4], + msg->rsp_size - 4); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4425,8 +4387,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, if (!user->gets_events) continue; - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { + recv_msg = ipmi_alloc_recv_msg(user); + if (IS_ERR(recv_msg)) { rcu_read_unlock(); list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { @@ -4445,8 +4407,6 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, deliver_count++; copy_event_into_recv_msg(recv_msg, msg); - recv_msg->user = user; - kref_get(&user->refcount); list_add_tail(&recv_msg->link, &msgs); } srcu_read_unlock(&intf->users_srcu, index); @@ -4462,8 +4422,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, * No one to receive the message, put it in queue if there's * not already too many things in the queue. */ - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { + recv_msg = ipmi_alloc_recv_msg(NULL); + if (IS_ERR(recv_msg)) { /* * We couldn't allocate memory for the * message, so requeue it for handling @@ -5155,27 +5115,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg) kfree(msg); } -static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user) { struct ipmi_recv_msg *rv; + if (user) { + if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { + atomic_dec(&user->nr_msgs); + return ERR_PTR(-EBUSY); + } + } + rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); - if (rv) { - rv->user = NULL; - rv->done = free_recv_msg; - atomic_inc(&recv_msg_inuse_count); + if (!rv) { + if (user) + atomic_dec(&user->nr_msgs); + return ERR_PTR(-ENOMEM); } + + rv->user = user; + rv->done = free_recv_msg; + if (user) + kref_get(&user->refcount); + atomic_inc(&recv_msg_inuse_count); return rv; } void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) { - if (msg->user && !oops_in_progress) + if (msg->user && !oops_in_progress) { + atomic_dec(&msg->user->nr_msgs); kref_put(&msg->user->refcount, free_user); + } msg->done(msg); } EXPORT_SYMBOL(ipmi_free_recv_msg); +static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, + struct ipmi_user *user) +{ + WARN_ON_ONCE(msg->user); /* User should not be set. */ + msg->user = user; + atomic_inc(&user->nr_msgs); + kref_get(&user->refcount); +} + static atomic_t panic_done_count = ATOMIC_INIT(0); static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index cf0be8a7939de..db41301e63f28 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -29,7 +29,7 @@ if TCG_TPM config TCG_TPM2_HMAC bool "Use HMAC and encrypted transactions on the TPM bus" - default X86_64 + default n select CRYPTO_ECDH select CRYPTO_LIB_AESCFB select CRYPTO_LIB_SHA256 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index ed0d3d8449b30..59e992dc65c4c 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -977,8 +977,8 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, * will call disable_irq which undoes all of the above. */ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { - tpm_tis_write8(priv, original_int_vec, - TPM_INT_VECTOR(priv->locality)); + tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), + original_int_vec); rc = -1; } diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c index c173a44c800aa..629f050a855aa 100644 --- a/drivers/clk/at91/clk-peripheral.c +++ b/drivers/clk/at91/clk-peripheral.c @@ -279,8 +279,11 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw, long best_diff = LONG_MIN; u32 shift; - if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) - return parent_rate; + if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) { + req->rate = parent_rate; + + return 0; + } /* Fist step: check the available dividers. */ for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c index bb648a88e43af..ad47fdb234607 100644 --- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c +++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c @@ -103,7 +103,7 @@ static const struct mtk_gate infra_ao_clks[] = { GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28), GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29), /* INFRA_AO1 */ - GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0), + GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "top_hdmi_xtal", 0), GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1), GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2), GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4), diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c index 60990296450bb..9a12e58230bed 100644 --- a/drivers/clk/mediatek/clk-mux.c +++ b/drivers/clk/mediatek/clk-mux.c @@ -146,9 +146,7 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index) static int mtk_clk_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { - struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); - - return clk_mux_determine_rate_flags(hw, req, mux->data->flags); + return clk_mux_determine_rate_flags(hw, req, 0); } const struct clk_ops mtk_mux_clr_set_upd_ops = { diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c index 81efa885069b2..b9e204d63a972 100644 --- a/drivers/clk/nxp/clk-lpc18xx-cgu.c +++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c @@ -370,23 +370,25 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw, return 0; } -static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int lpc18xx_pll0_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { unsigned long m; - if (*prate < rate) { + if (req->best_parent_rate < req->rate) { pr_warn("%s: pll dividers not supported\n", __func__); return -EINVAL; } - m = DIV_ROUND_UP_ULL(*prate, rate * 2); - if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) { - pr_warn("%s: unable to support rate %lu\n", __func__, rate); + m = DIV_ROUND_UP_ULL(req->best_parent_rate, req->rate * 2); + if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) { + pr_warn("%s: unable to support rate %lu\n", __func__, req->rate); return -EINVAL; } - return 2 * *prate * m; + req->rate = 2 * req->best_parent_rate * m; + + return 0; } static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, @@ -402,7 +404,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, } m = DIV_ROUND_UP_ULL(parent_rate, rate * 2); - if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) { + if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) { pr_warn("%s: unable to support rate %lu\n", __func__, rate); return -EINVAL; } @@ -443,7 +445,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops lpc18xx_pll0_ops = { .recalc_rate = lpc18xx_pll0_recalc_rate, - .round_rate = lpc18xx_pll0_round_rate, + .determine_rate = lpc18xx_pll0_determine_rate, .set_rate = lpc18xx_pll0_set_rate, }; diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index 33cc1f73c69d1..cab2dbb7a8d49 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -273,8 +273,8 @@ static int qcom_cc_icc_register(struct device *dev, icd[i].slave_id = desc->icc_hws[i].slave_id; hws = &desc->clks[desc->icc_hws[i].clk_id]->hw; icd[i].clk = devm_clk_hw_get_clk(dev, hws, "icc"); - if (!icd[i].clk) - return dev_err_probe(dev, -ENOENT, + if (IS_ERR(icd[i].clk)) + return dev_err_probe(dev, PTR_ERR(icd[i].clk), "(%d) clock entry is null\n", i); icd[i].name = clk_hw_get_name(hws); } diff --git a/drivers/clk/qcom/tcsrcc-x1e80100.c b/drivers/clk/qcom/tcsrcc-x1e80100.c index ff61769a08077..a367e1f55622d 100644 --- a/drivers/clk/qcom/tcsrcc-x1e80100.c +++ b/drivers/clk/qcom/tcsrcc-x1e80100.c @@ -29,6 +29,10 @@ static struct clk_branch tcsr_edp_clkref_en = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "tcsr_edp_clkref_en", + .parent_data = &(const struct clk_parent_data){ + .index = DT_BI_TCXO_PAD, + }, + .num_parents = 1, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 0f27c33192e10..112ed81f648ee 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -1012,6 +1012,7 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv, of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) { int idx; + unsigned int *new_ids; if (it.node != priv->np) continue; @@ -1022,11 +1023,13 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv, if (args[0] != CPG_MOD) continue; - ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL); - if (!ids) { + new_ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL); + if (!new_ids) { of_node_put(it.node); + kfree(ids); return -ENOMEM; } + ids = new_ids; if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) idx = MOD_CLK_PACK_10(args[1]); /* for DEF_MOD_STB() */ diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c index 7bfba0afd7783..4ec408c3a26aa 100644 --- a/drivers/clk/tegra/clk-bpmp.c +++ b/drivers/clk/tegra/clk-bpmp.c @@ -635,7 +635,7 @@ static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp, bpmp->num_clocks = count; - bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL); + bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(*bpmp->clocks), GFP_KERNEL); if (!bpmp->clocks) return -ENOMEM; diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c index e95fdc49c2269..bbceb0289d457 100644 --- a/drivers/clocksource/clps711x-timer.c +++ b/drivers/clocksource/clps711x-timer.c @@ -78,24 +78,33 @@ static int __init clps711x_timer_init(struct device_node *np) unsigned int irq = irq_of_parse_and_map(np, 0); struct clk *clock = of_clk_get(np, 0); void __iomem *base = of_iomap(np, 0); + int ret = 0; if (!base) return -ENOMEM; - if (!irq) - return -EINVAL; - if (IS_ERR(clock)) - return PTR_ERR(clock); + if (!irq) { + ret = -EINVAL; + goto unmap_io; + } + if (IS_ERR(clock)) { + ret = PTR_ERR(clock); + goto unmap_io; + } switch (of_alias_get_id(np, "timer")) { case CLPS711X_CLKSRC_CLOCKSOURCE: clps711x_clksrc_init(clock, base); break; case CLPS711X_CLKSRC_CLOCKEVENT: - return _clps711x_clkevt_init(clock, base, irq); + ret = _clps711x_clkevt_init(clock, base, irq); + break; default: - return -EINVAL; + ret = -EINVAL; + break; } - return 0; +unmap_io: + iounmap(base); + return ret; } TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init); diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c index 393966c097405..ef38e9ad98f62 100644 --- a/drivers/comedi/comedi_buf.c +++ b/drivers/comedi/comedi_buf.c @@ -368,7 +368,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s, unsigned int count = 0; const unsigned int num_sample_bytes = comedi_bytes_per_sample(s); - if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) { + if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) { async->munge_count += num_bytes; return num_bytes; } diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 8d5279c21e6cf..1abedcae50b26 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -339,6 +339,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy) return 0; } +static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu) +{ + unsigned int transition_latency_ns = cppc_get_transition_latency(cpu); + + if (transition_latency_ns == CPUFREQ_ETERNAL) + return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC; + + return transition_latency_ns / NSEC_PER_USEC; +} + /* * The PCC subspace describes the rate at which platform can accept commands * on the shared PCC channel (including READs which do not count towards freq @@ -361,12 +371,12 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) return 10000; } } - return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + return __cppc_cpufreq_get_transition_delay_us(cpu); } #else static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) { - return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + return __cppc_cpufreq_get_transition_delay_us(cpu); } #endif diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 983443396f8f2..30e1b64d0558f 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -110,7 +110,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); if (!transition_latency) - transition_latency = CPUFREQ_ETERNAL; + transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; cpumask_copy(policy->cpus, priv->cpus); policy->driver_data = priv; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index bd55c23563035..9600a96f91176 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2973,6 +2973,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) goto err_null_driver; } + /* + * Mark support for the scheduler's frequency invariance engine for + * drivers that implement target(), target_index() or fast_switch(). + */ + if (!cpufreq_driver->setpolicy) { + static_branch_enable_cpuslocked(&cpufreq_freq_invariance); + pr_debug("cpufreq: supports frequency invariance\n"); + } + ret = subsys_interface_register(&cpufreq_interface); if (ret) goto err_boost_unreg; @@ -2994,21 +3003,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) hp_online = ret; ret = 0; - /* - * Mark support for the scheduler's frequency invariance engine for - * drivers that implement target(), target_index() or fast_switch(). - */ - if (!cpufreq_driver->setpolicy) { - static_branch_enable_cpuslocked(&cpufreq_freq_invariance); - pr_debug("supports frequency invariance"); - } - pr_debug("driver %s up and running\n", driver_data->name); goto out; err_if_unreg: subsys_interface_unregister(&cpufreq_interface); err_boost_unreg: + if (!cpufreq_driver->setpolicy) + static_branch_disable_cpuslocked(&cpufreq_freq_invariance); remove_boost_sysfs_file(); err_null_driver: write_lock_irqsave(&cpufreq_driver_lock, flags); diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index c20d3ecc5a81e..33c1df7f683e4 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -443,7 +443,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) } if (of_property_read_u32(np, "clock-latency", &transition_latency)) - transition_latency = CPUFREQ_ETERNAL; + transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; /* * Calculate the ramp time for max voltage change in the diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e908710920389..d0f4f7c2ae4d9 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1582,10 +1582,10 @@ static void update_qos_request(enum freq_qos_req_type type) continue; req = policy->driver_data; - cpufreq_cpu_put(policy); - - if (!req) + if (!req) { + cpufreq_cpu_put(policy); continue; + } if (hwp_active) intel_pstate_get_hwp_cap(cpu); @@ -1601,6 +1601,8 @@ static void update_qos_request(enum freq_qos_req_type type) if (freq_qos_update_request(req, freq) < 0) pr_warn("Failed to update freq constraint: CPU%d\n", i); + + cpufreq_cpu_put(policy); } } diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index aeb5e63045421..1f1ec43aad7ca 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -238,7 +238,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000; if (!latency) - latency = CPUFREQ_ETERNAL; + latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; policy->cpuinfo.transition_latency = latency; policy->fast_switch_possible = true; diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index beb660ca240cc..bb265541671a5 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -279,7 +280,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) latency = perf_ops->transition_latency_get(ph, domain); if (!latency) - latency = CPUFREQ_ETERNAL; + latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; policy->cpuinfo.transition_latency = latency; @@ -398,6 +399,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev) return true; } + /* + * Older Broadcom STB chips had a "clocks" property for CPU node(s) + * that did not match the SCMI performance protocol node, if we got + * there, it means we had such an older Device Tree, therefore return + * true to preserve backwards compatibility. + */ + if (of_machine_is_compatible("brcm,brcmstb")) + return true; + return false; } diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index a191d9bdf667a..bdbc3923629e7 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -157,7 +157,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) latency = scpi_ops->get_transition_latency(cpu_dev); if (!latency) - latency = CPUFREQ_ETERNAL; + latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; policy->cpuinfo.transition_latency = latency; diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index d8ab5b01d46d0..c032dd5d12d3c 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -183,7 +183,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev) if (of_property_read_u32(np, "clock-latency", &spear_cpufreq.transition_latency)) - spear_cpufreq.transition_latency = CPUFREQ_ETERNAL; + spear_cpufreq.transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS; cnt = of_property_count_u32_elems(np, "cpufreq_tbl"); if (cnt <= 0) { diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 7b8fcfa55038b..39186008afbfd 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -86,10 +86,14 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy, { struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); struct cpufreq_frequency_table *tbl = policy->freq_table + index; - unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset; + unsigned int edvd_offset; u32 edvd_val = tbl->driver_data; + u32 cpu; - writel(edvd_val, data->regs + edvd_offset); + for_each_cpu(cpu, policy->cpus) { + edvd_offset = data->cpus[cpu].edvd_offset; + writel(edvd_val, data->regs + edvd_offset); + } return 0; } diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c index 1fc9968eae199..b6b06a510fd86 100644 --- a/drivers/cpuidle/cpuidle-qcom-spm.c +++ b/drivers/cpuidle/cpuidle-qcom-spm.c @@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu) return -ENODEV; saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); + of_node_put(cpu_node); if (!saw_node) return -ENODEV; pdev = of_find_device_by_node(saw_node); of_node_put(saw_node); - of_node_put(cpu_node); if (!pdev) return -ENODEV; data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL); - if (!data) + if (!data) { + put_device(&pdev->dev); return -ENOMEM; + } data->spm = dev_get_drvdata(&pdev->dev); + put_device(&pdev->dev); if (!data->spm) return -EINVAL; diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 3eb543b1644dc..a87c08f7eb686 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -199,20 +199,17 @@ static unsigned int get_typical_interval(struct menu_device *data) * * This can deal with workloads that have long pauses interspersed * with sporadic activity with a bunch of short pauses. + * + * However, if the number of remaining samples is too small to exclude + * any more outliers, allow the deepest available idle state to be + * selected because there are systems where the time spent by CPUs in + * deep idle states is correlated to the maximum frequency the CPUs + * can get to. On those systems, shallow idle states should be avoided + * unless there is a clear indication that the given CPU is most likley + * going to be woken up shortly. */ - if (divisor * 4 <= INTERVALS * 3) { - /* - * If there are sufficiently many data points still under - * consideration after the outliers have been eliminated, - * returning without a prediction would be a mistake because it - * is likely that the next interval will not exceed the current - * maximum, so return the latter in that case. - */ - if (divisor >= INTERVALS / 2) - return max; - + if (divisor * 4 <= INTERVALS * 3) return UINT_MAX; - } thresh = max - 1; goto again; diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c index a72dfebc53ffc..fa201dae1f81b 100644 --- a/drivers/crypto/aspeed/aspeed-hace-crypto.c +++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c @@ -346,7 +346,7 @@ static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev) } else { dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents, - DMA_TO_DEVICE); + DMA_FROM_DEVICE); dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); } diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index dcc2380a5889f..d15b2e943447e 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -512,7 +512,7 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd) if (err && (dd->flags & TDES_FLAGS_FAST)) { dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); } return err; diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 45e130b901eb5..17eb236e9ee4d 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -888,6 +888,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm, dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); ret = PTR_ERR(qm->debug.acc_diff_regs); qm->debug.acc_diff_regs = NULL; + qm->debug.qm_diff_regs = NULL; return ret; } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 34d30b7838134..a11fe5e276773 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -690,6 +690,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) /* Config data buffer pasid needed by Kunpeng 920 */ hpre_config_pasid(qm); + hpre_open_sva_prefetch(qm); hpre_enable_clock_gate(qm); @@ -1366,8 +1367,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) if (ret) return ret; - hpre_open_sva_prefetch(qm); - hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index b18692ee7fd56..a9550a05dfbd3 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3657,6 +3657,10 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, } pdev = container_of(dev, struct pci_dev, dev); + if (pci_physfn(pdev) != qm->pdev) { + pci_err(qm->pdev, "the pdev input does not match the pf!\n"); + return -EINVAL; + } *fun_index = pdev->devfn; @@ -4272,9 +4276,6 @@ static void qm_restart_prepare(struct hisi_qm *qm) { u32 value; - if (qm->err_ini->open_sva_prefetch) - qm->err_ini->open_sva_prefetch(qm); - if (qm->ver >= QM_HW_V3) return; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 75c25f0d5f2b8..9014cc36705ff 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -441,6 +441,45 @@ static void sec_set_endian(struct hisi_qm *qm) writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); } +static void sec_close_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) + return; + + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); + val |= SEC_PREFETCH_DISABLE; + writel(val, qm->io_base + SEC_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, + val, !(val & SEC_SVA_DISABLE_READY), + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to close sva prefetch\n"); +} + +static void sec_open_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) + return; + + /* Enable prefetch */ + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); + val &= SEC_PREFETCH_ENABLE; + writel(val, qm->io_base + SEC_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, + val, !(val & SEC_PREFETCH_DISABLE), + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to open sva prefetch\n"); +} + static void sec_engine_sva_config(struct hisi_qm *qm) { u32 reg; @@ -474,45 +513,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm) writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); } -} - -static void sec_open_sva_prefetch(struct hisi_qm *qm) -{ - u32 val; - int ret; - - if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) - return; - - /* Enable prefetch */ - val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); - val &= SEC_PREFETCH_ENABLE; - writel(val, qm->io_base + SEC_PREFETCH_CFG); - - ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, - val, !(val & SEC_PREFETCH_DISABLE), - SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); - if (ret) - pci_err(qm->pdev, "failed to open sva prefetch\n"); -} - -static void sec_close_sva_prefetch(struct hisi_qm *qm) -{ - u32 val; - int ret; - - if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) - return; - - val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); - val |= SEC_PREFETCH_DISABLE; - writel(val, qm->io_base + SEC_PREFETCH_CFG); - - ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, - val, !(val & SEC_SVA_DISABLE_READY), - SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); - if (ret) - pci_err(qm->pdev, "failed to close sva prefetch\n"); + sec_open_sva_prefetch(qm); } static void sec_enable_clock_gate(struct hisi_qm *qm) @@ -1094,7 +1095,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) if (ret) return ret; - sec_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); sec_debug_regs_clear(qm); ret = sec_show_last_regs_init(qm); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 80c2fcb1d26dc..323f53217f0c0 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -449,10 +449,9 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg) return false; } -static int hisi_zip_set_high_perf(struct hisi_qm *qm) +static void hisi_zip_set_high_perf(struct hisi_qm *qm) { u32 val; - int ret; val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET); if (perf_mode == HZIP_HIGH_COMP_PERF) @@ -462,13 +461,6 @@ static int hisi_zip_set_high_perf(struct hisi_qm *qm) /* Set perf mode */ writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET); - ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET, - val, val == perf_mode, HZIP_DELAY_1_US, - HZIP_POLL_TIMEOUT_US); - if (ret) - pci_err(qm->pdev, "failed to set perf mode\n"); - - return ret; } static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) @@ -565,6 +557,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63); writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); } + hisi_zip_open_sva_prefetch(qm); /* let's open all compression/decompression cores */ dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val; @@ -576,6 +569,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); + hisi_zip_set_high_perf(qm); hisi_zip_enable_clock_gate(qm); return 0; @@ -1171,11 +1165,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) if (ret) return ret; - ret = hisi_zip_set_high_perf(qm); - if (ret) - return ret; - - hisi_zip_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm); diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c index fdeca861933cb..903f31dc663e9 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c @@ -232,7 +232,7 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req) struct device *dev = rctx->hcu_dev->dev; unsigned int remainder = 0; unsigned int total; - size_t nents; + int nents; size_t count; int rc; int i; @@ -253,6 +253,9 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req) /* Determine the number of scatter gather list entries to process. */ nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder); + if (nents < 0) + return nents; + /* If there are entries to process, map them. */ if (nents) { rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents, diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 1493a373baf71..cf68b213c3e62 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1614,7 +1614,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, return -EINVAL; } err_msg = "Invalid engine group format"; - strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1); + strscpy(tmp_buf, ctx->val.vstr); start = tmp_buf; has_se = has_ie = has_ae = false; diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 69d6019d8abcf..ba26b4db7a608 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -252,7 +252,7 @@ static void rk_hash_unprepare(struct crypto_engine *engine, void *breq) struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); struct rk_crypto_info *rkc = rctx->dev; - dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); + dma_unmap_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); } static int rk_hash_run(struct crypto_engine *engine, void *breq) diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index e2a1e4463b6f5..712df781436ce 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -116,6 +116,7 @@ struct rockchip_dfi { int buswidth[DMC_MAX_CHANNELS]; int ddrmon_stride; bool ddrmon_ctrl_single; + unsigned int count_multiplier; /* number of data clocks per count */ }; static int rockchip_dfi_enable(struct rockchip_dfi *dfi) @@ -435,7 +436,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event) switch (event->attr.config) { case PERF_EVENT_CYCLES: - count = total.c[0].clock_cycles; + count = total.c[0].clock_cycles * dfi->count_multiplier; break; case PERF_EVENT_READ_BYTES: for (i = 0; i < dfi->max_channels; i++) @@ -656,6 +657,9 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi) break; } + if (!dfi->count_multiplier) + dfi->count_multiplier = 1; + ret = perf_pmu_register(pmu, "rockchip_ddr", -1); if (ret) return ret; @@ -752,6 +756,7 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi) dfi->max_channels = 4; dfi->ddrmon_stride = 0x4000; + dfi->count_multiplier = 2; return 0; }; diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c index 7ad5225b0381d..6e608d92f7e66 100644 --- a/drivers/devfreq/mtk-cci-devfreq.c +++ b/drivers/devfreq/mtk-cci-devfreq.c @@ -386,7 +386,8 @@ static int mtk_ccifreq_probe(struct platform_device *pdev) out_free_resources: if (regulator_is_enabled(drv->proc_reg)) regulator_disable(drv->proc_reg); - if (drv->sram_reg && regulator_is_enabled(drv->sram_reg)) + if (!IS_ERR_OR_NULL(drv->sram_reg) && + regulator_is_enabled(drv->sram_reg)) regulator_disable(drv->sram_reg); return ret; diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 0e127a9109e75..2e5c30f7ba0f4 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -350,7 +350,7 @@ static long udmabuf_create(struct miscdevice *device, return -ENOMEM; INIT_LIST_HEAD(&ubuf->unpin_list); - pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; + pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; for (i = 0; i < head->count; i++) { if (!PAGE_ALIGNED(list[i].offset)) goto err; diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 74a83203181d6..e55136bb525e2 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -923,6 +923,8 @@ static void idxd_remove(struct pci_dev *pdev) idxd_cleanup_interrupts(idxd); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); + if (device_user_pasid_enabled(idxd)) + idxd_disable_sva(idxd->pdev); pci_iounmap(pdev, idxd->reg_base); put_device(idxd_confdev(idxd)); pci_disable_device(pdev); diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 4200aec048318..70dc0ee1cc08f 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -305,6 +305,14 @@ DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 10); DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 11); +DEVICE_CHANNEL(ch12_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 12); +DEVICE_CHANNEL(ch13_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 13); +DEVICE_CHANNEL(ch14_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 14); +DEVICE_CHANNEL(ch15_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 15); /* Total possible dynamic DIMM Label attribute file table */ static struct attribute *dynamic_csrow_dimm_attr[] = { @@ -320,6 +328,10 @@ static struct attribute *dynamic_csrow_dimm_attr[] = { &dev_attr_legacy_ch9_dimm_label.attr.attr, &dev_attr_legacy_ch10_dimm_label.attr.attr, &dev_attr_legacy_ch11_dimm_label.attr.attr, + &dev_attr_legacy_ch12_dimm_label.attr.attr, + &dev_attr_legacy_ch13_dimm_label.attr.attr, + &dev_attr_legacy_ch14_dimm_label.attr.attr, + &dev_attr_legacy_ch15_dimm_label.attr.attr, NULL }; @@ -348,6 +360,14 @@ DEVICE_CHANNEL(ch10_ce_count, S_IRUGO, channel_ce_count_show, NULL, 10); DEVICE_CHANNEL(ch11_ce_count, S_IRUGO, channel_ce_count_show, NULL, 11); +DEVICE_CHANNEL(ch12_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 12); +DEVICE_CHANNEL(ch13_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 13); +DEVICE_CHANNEL(ch14_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 14); +DEVICE_CHANNEL(ch15_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 15); /* Total possible dynamic ce_count attribute file table */ static struct attribute *dynamic_csrow_ce_count_attr[] = { @@ -363,6 +383,10 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = { &dev_attr_legacy_ch9_ce_count.attr.attr, &dev_attr_legacy_ch10_ce_count.attr.attr, &dev_attr_legacy_ch11_ce_count.attr.attr, + &dev_attr_legacy_ch12_ce_count.attr.attr, + &dev_attr_legacy_ch13_ce_count.attr.attr, + &dev_attr_legacy_ch14_ce_count.attr.attr, + &dev_attr_legacy_ch15_ce_count.attr.attr, NULL }; diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index ac4b3d95531c5..d8cd12d906a72 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -967,6 +967,15 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan) return !!GET_BITFIELD(mcmtr, 2, 2); } +static bool i10nm_channel_disabled(struct skx_imc *imc, int chan) +{ + u32 mcmtr = I10NM_GET_MCMTR(imc, chan); + + edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr); + + return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18)); +} + static int i10nm_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg) { @@ -980,6 +989,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci, if (!imc->mbase) continue; + if (i10nm_channel_disabled(imc, i)) { + edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i); + continue; + } + ndimms = 0; if (res_cfg->type != GNR) diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index b360dca2c69e8..cc9731c3616c1 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -41,7 +41,7 @@ /* * ABI version history is documented in linux/firewire-cdev.h. */ -#define FW_CDEV_KERNEL_VERSION 5 +#define FW_CDEV_KERNEL_VERSION 6 #define FW_CDEV_VERSION_EVENT_REQUEST2 4 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5 diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index cdec50a698a10..87acc795f7436 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -297,10 +297,28 @@ enum debug_counters { SCMI_DEBUG_COUNTERS_LAST }; -static inline void scmi_inc_count(atomic_t *arr, int stat) +/** + * struct scmi_debug_info - Debug common info + * @top_dentry: A reference to the top debugfs dentry + * @name: Name of this SCMI instance + * @type: Type of this SCMI instance + * @is_atomic: Flag to state if the transport of this instance is atomic + * @counters: An array of atomic_c's used for tracking statistics (if enabled) + */ +struct scmi_debug_info { + struct dentry *top_dentry; + const char *name; + const char *type; + bool is_atomic; + atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; +}; + +static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat) { - if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) - atomic_inc(&arr[stat]); + if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) { + if (dbg) + atomic_inc(&dbg->counters[stat]); + } } enum scmi_bad_msg { diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index f1abe605865ad..79866f3b6b3e7 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -111,22 +111,6 @@ struct scmi_protocol_instance { #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph) -/** - * struct scmi_debug_info - Debug common info - * @top_dentry: A reference to the top debugfs dentry - * @name: Name of this SCMI instance - * @type: Type of this SCMI instance - * @is_atomic: Flag to state if the transport of this instance is atomic - * @counters: An array of atomic_c's used for tracking statistics (if enabled) - */ -struct scmi_debug_info { - struct dentry *top_dentry; - const char *name; - const char *type; - bool is_atomic; - atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; -}; - /** * struct scmi_info - Structure representing a SCMI instance * @@ -787,6 +771,7 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) hash_del(&xfer->node); xfer->pending = false; } + xfer->flags = 0; hlist_add_head(&xfer->node, &minfo->free_xfers); } spin_unlock_irqrestore(&minfo->xfer_lock, flags); @@ -805,8 +790,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) { struct scmi_info *info = handle_to_scmi_info(handle); - xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW; - xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET; return __scmi_xfer_put(&info->tx_minfo, xfer); } @@ -1000,7 +983,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) spin_unlock_irqrestore(&minfo->xfer_lock, flags); scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED); - scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); + scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED); return xfer; } @@ -1028,7 +1011,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) msg_type, xfer_id, msg_hdr, xfer->state); scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID); - scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); + scmi_inc_count(info->dbg, ERR_MSG_INVALID); /* On error the refcount incremented above has to be dropped */ __scmi_xfer_put(minfo, xfer); @@ -1073,7 +1056,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, PTR_ERR(xfer)); scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM); - scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); + scmi_inc_count(info->dbg, ERR_MSG_NOMEM); scmi_clear_channel(info, cinfo); return; @@ -1089,7 +1072,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); - scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); + scmi_inc_count(info->dbg, NOTIFICATION_OK); scmi_notify(cinfo->handle, xfer->hdr.protocol_id, xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); @@ -1149,10 +1132,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { scmi_clear_channel(info, cinfo); complete(xfer->async_done); - scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); + scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK); } else { complete(&xfer->done); - scmi_inc_count(info->dbg->counters, RESPONSE_OK); + scmi_inc_count(info->dbg, RESPONSE_OK); } if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { @@ -1261,7 +1244,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, "timed out in resp(caller: %pS) - polling\n", (void *)_RET_IP_); ret = -ETIMEDOUT; - scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); + scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT); } } @@ -1286,7 +1269,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, "RESP" : "resp", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); - scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); + scmi_inc_count(info->dbg, RESPONSE_POLLED_OK); if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { scmi_raw_message_report(info->raw, xfer, @@ -1301,7 +1284,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, dev_err(dev, "timed out in resp(caller: %pS)\n", (void *)_RET_IP_); ret = -ETIMEDOUT; - scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); + scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT); } } @@ -1385,13 +1368,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph, !is_transport_polling_capable(info->desc)) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); - scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); + scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED); return -EINVAL; } cinfo = idr_find(&info->tx_idr, pi->proto->id); if (unlikely(!cinfo)) { - scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); + scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND); return -EINVAL; } /* True ONLY if also supported by transport. */ @@ -1425,19 +1408,19 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ret = info->desc->ops->send_message(cinfo, xfer); if (ret < 0) { dev_dbg(dev, "Failed to send message %d\n", ret); - scmi_inc_count(info->dbg->counters, SENT_FAIL); + scmi_inc_count(info->dbg, SENT_FAIL); return ret; } trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, xfer->hdr.id, "CMND", xfer->hdr.seq, xfer->hdr.status, xfer->tx.buf, xfer->tx.len); - scmi_inc_count(info->dbg->counters, SENT_OK); + scmi_inc_count(info->dbg, SENT_OK); ret = scmi_wait_for_message_response(cinfo, xfer); if (!ret && xfer->hdr.status) { ret = scmi_to_linux_errno(xfer->hdr.status); - scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); + scmi_inc_count(info->dbg, ERR_PROTOCOL); } if (info->desc->ops->mark_txdone) diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c index d349766bc0b26..f78b87f334037 100644 --- a/drivers/firmware/arm_scmi/transports/virtio.c +++ b/drivers/firmware/arm_scmi/transports/virtio.c @@ -870,6 +870,9 @@ static int scmi_vio_probe(struct virtio_device *vdev) /* Ensure initialized scmi_vdev is visible */ smp_store_mb(scmi_vdev, vdev); + /* Set device ready */ + virtio_device_ready(vdev); + ret = platform_driver_register(&scmi_virtio_driver); if (ret) { vdev->priv = NULL; diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig index f2fdd37566482..179f5d46d8ddf 100644 --- a/drivers/firmware/meson/Kconfig +++ b/drivers/firmware/meson/Kconfig @@ -5,7 +5,7 @@ config MESON_SM tristate "Amlogic Secure Monitor driver" depends on ARCH_MESON || COMPILE_TEST - default y + default ARCH_MESON depends on ARM64_4K_PAGES help Say y here to enable the Amlogic secure monitor driver diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index f25a9746249b6..3ab67aaa9e5da 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -232,11 +232,16 @@ EXPORT_SYMBOL(meson_sm_call_write); struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node) { struct platform_device *pdev = of_find_device_by_node(sm_node); + struct meson_sm_firmware *fw; if (!pdev) return NULL; - return platform_get_drvdata(pdev); + fw = platform_get_drvdata(pdev); + + put_device(&pdev->dev); + + return fw; } EXPORT_SYMBOL_GPL(meson_sm_get); diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c index f03ccd0f534cf..f60872d1e8a58 100644 --- a/drivers/gpio/gpio-104-idio-16.c +++ b/drivers/gpio/gpio-104-idio-16.c @@ -59,6 +59,7 @@ static const struct regmap_config idio_16_regmap_config = { .reg_stride = 1, .val_bits = 8, .io_port = true, + .max_register = 0x5, .wr_table = &idio_16_wr_table, .rd_table = &idio_16_rd_table, .volatile_table = &idio_16_rd_table, diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c index 2c95125892972..f7e557c2c9cd4 100644 --- a/drivers/gpio/gpio-idio-16.c +++ b/drivers/gpio/gpio-idio-16.c @@ -3,6 +3,7 @@ * GPIO library for the ACCES IDIO-16 family * Copyright (C) 2022 William Breathitt Gray */ +#include #include #include #include @@ -106,6 +107,7 @@ int devm_idio_16_regmap_register(struct device *const dev, struct idio_16_data *data; struct regmap_irq_chip *chip; struct regmap_irq_chip_data *chip_data; + DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO); if (!config->parent) return -EINVAL; @@ -163,6 +165,9 @@ int devm_idio_16_regmap_register(struct device *const dev, gpio_config.irq_domain = regmap_irq_get_domain(chip_data); gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate; + bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0)); + gpio_config.fixed_direction_output = fixed_direction_output; + return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config)); } EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register); diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c index c2a9b42539744..c3a595c6f6c72 100644 --- a/drivers/gpio/gpio-ljca.c +++ b/drivers/gpio/gpio-ljca.c @@ -281,22 +281,14 @@ static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data, { const struct ljca_gpio_packet *packet = evt_data; struct ljca_gpio_dev *ljca_gpio = context; - int i, irq; + int i; if (cmd != LJCA_GPIO_INT_EVENT) return; for (i = 0; i < packet->num; i++) { - irq = irq_find_mapping(ljca_gpio->gc.irq.domain, - packet->item[i].index); - if (!irq) { - dev_err(ljca_gpio->gc.parent, - "gpio_id %u does not mapped to IRQ yet\n", - packet->item[i].index); - return; - } - - generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq); + generic_handle_domain_irq(ljca_gpio->gc.irq.domain, + packet->item[i].index); set_bit(packet->item[i].index, ljca_gpio->reenable_irqs); } diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c index 44c0a21b1d1d9..5827b18d190bd 100644 --- a/drivers/gpio/gpio-pci-idio-16.c +++ b/drivers/gpio/gpio-pci-idio-16.c @@ -41,6 +41,7 @@ static const struct regmap_config idio_16_regmap_config = { .reg_stride = 1, .val_bits = 8, .io_port = true, + .max_register = 0x7, .wr_table = &idio_16_wr_table, .rd_table = &idio_16_rd_table, .volatile_table = &idio_16_rd_table, diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c index 71684dee2ca5d..fed9af5ff9ec2 100644 --- a/drivers/gpio/gpio-regmap.c +++ b/drivers/gpio/gpio-regmap.c @@ -29,6 +29,12 @@ struct gpio_regmap { unsigned int reg_clr_base; unsigned int reg_dir_in_base; unsigned int reg_dir_out_base; + unsigned long *fixed_direction_output; + +#ifdef CONFIG_REGMAP_IRQ + int regmap_irq_line; + struct regmap_irq_chip_data *irq_chip_data; +#endif int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base, unsigned int offset, unsigned int *reg, @@ -117,6 +123,13 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip, unsigned int base, val, reg, mask; int invert, ret; + if (gpio->fixed_direction_output) { + if (test_bit(offset, gpio->fixed_direction_output)) + return GPIO_LINE_DIRECTION_OUT; + else + return GPIO_LINE_DIRECTION_IN; + } + if (gpio->reg_dat_base && !gpio->reg_set_base) return GPIO_LINE_DIRECTION_IN; if (gpio->reg_set_base && !gpio->reg_dat_base) @@ -203,6 +216,7 @@ EXPORT_SYMBOL_GPL(gpio_regmap_get_drvdata); */ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config) { + struct irq_domain *irq_domain; struct gpio_regmap *gpio; struct gpio_chip *chip; int ret; @@ -274,12 +288,37 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config chip->direction_output = gpio_regmap_direction_output; } + if (config->fixed_direction_output) { + gpio->fixed_direction_output = bitmap_alloc(chip->ngpio, + GFP_KERNEL); + if (!gpio->fixed_direction_output) { + ret = -ENOMEM; + goto err_free_gpio; + } + bitmap_copy(gpio->fixed_direction_output, + config->fixed_direction_output, chip->ngpio); + } + ret = gpiochip_add_data(chip, gpio); if (ret < 0) - goto err_free_gpio; + goto err_free_bitmap; + +#ifdef CONFIG_REGMAP_IRQ + if (config->regmap_irq_chip) { + gpio->regmap_irq_line = config->regmap_irq_line; + ret = regmap_add_irq_chip_fwnode(dev_fwnode(config->parent), config->regmap, + config->regmap_irq_line, config->regmap_irq_flags, + 0, config->regmap_irq_chip, &gpio->irq_chip_data); + if (ret) + goto err_free_bitmap; - if (config->irq_domain) { - ret = gpiochip_irqchip_add_domain(chip, config->irq_domain); + irq_domain = regmap_irq_get_domain(gpio->irq_chip_data); + } else +#endif + irq_domain = config->irq_domain; + + if (irq_domain) { + ret = gpiochip_irqchip_add_domain(chip, irq_domain); if (ret) goto err_remove_gpiochip; } @@ -288,6 +327,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config err_remove_gpiochip: gpiochip_remove(chip); +err_free_bitmap: + bitmap_free(gpio->fixed_direction_output); err_free_gpio: kfree(gpio); return ERR_PTR(ret); @@ -300,7 +341,13 @@ EXPORT_SYMBOL_GPL(gpio_regmap_register); */ void gpio_regmap_unregister(struct gpio_regmap *gpio) { +#ifdef CONFIG_REGMAP_IRQ + if (gpio->irq_chip_data) + regmap_del_irq_chip(gpio->regmap_irq_line, gpio->irq_chip_data); +#endif + gpiochip_remove(&gpio->gpio_chip); + bitmap_free(gpio->fixed_direction_output); kfree(gpio); } EXPORT_SYMBOL_GPL(gpio_regmap_unregister); diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c index cfa7b0a50c8e3..03b16b8f639ad 100644 --- a/drivers/gpio/gpio-wcd934x.c +++ b/drivers/gpio/gpio-wcd934x.c @@ -102,7 +102,7 @@ static int wcd_gpio_probe(struct platform_device *pdev) chip->base = -1; chip->ngpio = WCD934X_NPINS; chip->label = dev_name(dev); - chip->can_sleep = false; + chip->can_sleep = true; return devm_gpiochip_add_data(dev, chip, data); } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 209871c219d69..e5d0d2b0d7989 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -4317,6 +4317,23 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode, return desc; } +static struct gpio_desc *gpiod_fwnode_lookup(struct fwnode_handle *fwnode, + struct device *consumer, + const char *con_id, + unsigned int idx, + enum gpiod_flags *flags, + unsigned long *lookupflags) +{ + struct gpio_desc *desc; + + desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, flags, lookupflags); + if (gpiod_not_found(desc) && !IS_ERR_OR_NULL(fwnode)) + desc = gpiod_find_by_fwnode(fwnode->secondary, consumer, con_id, + idx, flags, lookupflags); + + return desc; +} + struct gpio_desc *gpiod_find_and_request(struct device *consumer, struct fwnode_handle *fwnode, const char *con_id, @@ -4335,8 +4352,8 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer, int ret = 0; scoped_guard(srcu, &gpio_devices_srcu) { - desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, - &flags, &lookupflags); + desc = gpiod_fwnode_lookup(fwnode, consumer, con_id, idx, + &flags, &lookupflags); if (gpiod_not_found(desc) && platform_lookup_allowed) { /* * Either we are not using DT or ACPI, or their lookup diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index c7b18c52825d6..784651269ec55 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -83,7 +83,8 @@ amdgpu-y += \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \ sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \ - nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o + nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \ + cyan_skillfish_reg_init.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1465b3adacb0a..d349a4816e537 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2353,10 +2353,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, struct kfd_vm_fault_info *mem) { - if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { + if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) { *mem = *adev->gmc.vm_fault_info; - mb(); /* make sure read happened */ - atomic_set(&adev->gmc.vm_fault_info_updated, 0); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 6042956cd5c3c..e00b5e4542347 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1016,7 +1016,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, /* Until a uniform way is figured, get mask based on hwid */ switch (hw_id) { case VCN_HWID: - harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; + /* VCN vs UVD+VCE */ + if (!amdgpu_ip_version(adev, VCE_HWIP, 0)) + harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; break; case DMU_HWID: if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) @@ -2462,7 +2464,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; adev->gmc.num_umc = 4; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); @@ -2489,7 +2493,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; adev->gmc.num_umc = 4; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); @@ -2516,8 +2522,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 1; + adev->sdma.sdma_mask = 1; adev->vcn.num_vcn_inst = 1; adev->gmc.num_umc = 2; + adev->gfx.xcc_mask = 1; if (adev->apu_flags & AMD_APU_IS_RAVEN2) { adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); @@ -2560,7 +2568,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega20_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; adev->gmc.num_umc = 8; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); @@ -2588,8 +2598,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); arct_reg_base_init(adev); adev->sdma.num_instances = 8; + adev->sdma.sdma_mask = 0xff; adev->vcn.num_vcn_inst = 2; adev->gmc.num_umc = 8; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); @@ -2621,8 +2633,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); aldebaran_reg_base_init(adev); adev->sdma.num_instances = 5; + adev->sdma.sdma_mask = 0x1f; adev->vcn.num_vcn_inst = 2; adev->gmc.num_umc = 4; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); @@ -2644,6 +2658,38 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); break; + case CHIP_CYAN_SKILLFISH: + if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { + r = amdgpu_discovery_reg_base_init(adev); + if (r) + return -EINVAL; + + amdgpu_discovery_harvest_ip(adev); + amdgpu_discovery_get_gfx_info(adev); + amdgpu_discovery_get_mall_info(adev); + amdgpu_discovery_get_vcn_info(adev); + } else { + cyan_skillfish_reg_base_init(adev); + adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; + adev->gfx.xcc_mask = 1; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1); + adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3); + } + break; default: r = amdgpu_discovery_reg_base_init(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8553ac4c0ad3f..a8358d1d1acbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -2171,7 +2171,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp) } ret = psp_ta_load(psp, &psp->securedisplay_context.context); - if (!ret) { + if (!ret && !psp->securedisplay_context.context.resp_status) { psp->securedisplay_context.context.initialized = true; mutex_init(&psp->securedisplay_context.mutex); } else diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c new file mode 100644 index 0000000000000..96616a865aac7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "nv.h" + +#include "soc15_common.h" +#include "soc15_hw_ip.h" +#include "cyan_skillfish_ip_offset.h" + +int cyan_skillfish_reg_base_init(struct amdgpu_device *adev) +{ + /* HW has more IP blocks, only initialized the blocke needed by driver */ + uint32_t i; + + adev->gfx.xcc_mask = 1; + for (i = 0 ; i < MAX_INSTANCE ; ++i) { + adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); + adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); + adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); + adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); + adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); + adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); + adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); + adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); + adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); + adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); + adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); + } + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 994432fb57eaf..8e2f731256504 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1055,7 +1055,7 @@ static int gmc_v7_0_sw_init(void *handle) GFP_KERNEL); if (!adev->gmc.vm_fault_info) return -ENOMEM; - atomic_set(&adev->gmc.vm_fault_info_updated, 0); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); return 0; } @@ -1287,7 +1287,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) - && !atomic_read(&adev->gmc.vm_fault_info_updated)) { + && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) { struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1303,8 +1303,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, info->prot_read = protections & 0x8 ? true : false; info->prot_write = protections & 0x10 ? true : false; info->prot_exec = protections & 0x20 ? true : false; - mb(); - atomic_set(&adev->gmc.vm_fault_info_updated, 1); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 1); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 86488c052f822..5248832c04adf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1168,7 +1168,7 @@ static int gmc_v8_0_sw_init(void *handle) GFP_KERNEL); if (!adev->gmc.vm_fault_info) return -ENOMEM; - atomic_set(&adev->gmc.vm_fault_info_updated, 0); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); return 0; } @@ -1468,7 +1468,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) - && !atomic_read(&adev->gmc.vm_fault_info_updated)) { + && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) { struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1484,8 +1484,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, info->prot_read = protections & 0x8 ? true : false; info->prot_write = protections & 0x10 ? true : false; info->prot_exec = protections & 0x20 ? true : false; - mb(); - atomic_set(&adev->gmc.vm_fault_info_updated, 1); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 1); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 49113df8baefd..25b175f23312c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -677,6 +677,12 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes_set_hw_res_pkt.enable_reg_active_poll = 1; mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; mes_set_hw_res_pkt.oversubscription_timer = 50; + if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f) + mes_set_hw_res_pkt.enable_lr_compute_wa = 1; + else + dev_info_once(mes->adev->dev, + "MES FW version must be >= 0x7f to enable LR compute workaround.\n"); + if (amdgpu_mes_log_enable) { mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 459f7b8d72b4d..19cbf80fa3214 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -224,7 +224,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, pipe, x_pkt->header.opcode); r = amdgpu_fence_wait_polling(ring, seq, timeout); - if (r < 1 || !*status_ptr) { + + /* + * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success). + * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information. + */ + if (r < 1 || !(lower_32_bits(*status_ptr))) { if (misc_op_str) dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n", @@ -610,6 +615,11 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) mes_set_hw_res_pkt.use_different_vmid_compute = 1; mes_set_hw_res_pkt.enable_reg_active_poll = 1; mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; + if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82) + mes_set_hw_res_pkt.enable_lr_compute_wa = 1; + else + dev_info_once(adev->dev, + "MES FW version must be >= 0x82 to enable LR compute workaround.\n"); /* * Keep oversubscribe timer for sdma . When we have unmapped doorbell diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h index 83e9782aef39d..8f4817404f10d 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.h +++ b/drivers/gpu/drm/amd/amdgpu/nv.h @@ -31,5 +31,6 @@ extern const struct amdgpu_ip_block_version nv_common_ip_block; void nv_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); void nv_set_virt_ops(struct amdgpu_device *adev); +int cyan_skillfish_reg_base_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 805d6662c88b6..21376d98ee498 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -623,7 +623,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev, * * @handle: handle used to pass amdgpu_device pointer * - * Initialize the hardware, boot up the VCPU and do some testing + * Initialize the hardware, boot up the VCPU and do some testing. + * + * On SI, the UVD is meant to be used in a specific power state, + * or alternatively the driver can manually enable its clock. + * In amdgpu we use the dedicated UVD power state when DPM is enabled. + * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state + * for the SMU and afterwards enables the UVD clock. + * This is automatically done by amdgpu_uvd_ring_begin_use when work + * is submitted to the UVD ring. Here, we have to call it manually + * in order to power up UVD before firmware validation. + * + * Note that we must not disable the UVD clock here, as that would + * cause the ring test to fail. However, UVD is powered off + * automatically after the ring test: amdgpu_uvd_ring_end_use calls + * the UVD idle work handler which will disable the UVD clock when + * all fences are signalled. */ static int uvd_v3_1_hw_init(void *handle) { @@ -633,6 +648,15 @@ static int uvd_v3_1_hw_init(void *handle) int r; uvd_v3_1_mc_resume(adev); + uvd_v3_1_enable_mgcg(adev, true); + + /* Make sure UVD is powered during FW validation. + * It's going to be automatically powered off after the ring test. + */ + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + else + amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); r = uvd_v3_1_fw_validate(adev); if (r) { @@ -640,9 +664,6 @@ static int uvd_v3_1_hw_init(void *handle) return r; } - uvd_v3_1_enable_mgcg(adev, true); - amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - uvd_v3_1_start(adev); r = amdgpu_ring_test_helper(ring); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 3e9e0f36cd3f4..a8d4b3a3e77af 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -4231,7 +4231,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, r = svm_range_get_attr(p, mm, start, size, nattrs, attrs); break; default: - r = EINVAL; + r = -EINVAL; break; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b02ff92bae0b1..ffa0d7483ffc1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2027,6 +2027,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.disable_ips_in_vpb = 0; + /* DCN35 and above supports dynamic DTBCLK switch */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0)) + init_data.flags.allow_0_dtb_clk = true; + /* Enable DWB for tested platforms only */ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) init_data.num_virtual_links = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index 2b1673d69ea83..1ab5ae9b5ea51 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration( REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0); if (data->taps.h_taps + data->taps.v_taps <= 2) { - /* Set bypass */ - - /* DCE6 has no SCL_MODE register, skip scale mode programming */ + /* Disable scaler functionality */ + REG_WRITE(SCL_SCALER_ENABLE, 0); + /* Clear registers that can cause glitches even when the scaler is off */ + REG_WRITE(SCL_TAP_CONTROL, 0); + REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0); + REG_WRITE(SCL_F_SHARP_CONTROL, 0); return false; } @@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration( SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1, SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1); - /* DCE6 has no SCL_MODE register, skip scale mode programming */ + REG_WRITE(SCL_SCALER_ENABLE, 1); /* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */ @@ -502,6 +505,8 @@ static void dce60_transform_set_scaler( REG_SET(DC_LB_MEM_SIZE, 0, DC_LB_MEM_SIZE, xfm_dce->lb_memory_size); + REG_WRITE(SCL_UPDATE, 0x00010000); + /* Clear SCL_F_SHARP_CONTROL value to 0 */ REG_WRITE(SCL_F_SHARP_CONTROL, 0); @@ -527,8 +532,7 @@ static void dce60_transform_set_scaler( if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) { /* 4. Program vertical filters */ if (xfm_dce->filter_v == NULL) - REG_SET(SCL_VERT_FILTER_CONTROL, 0, - SCL_V_2TAP_HARDCODE_COEF_EN, 0); + REG_WRITE(SCL_VERT_FILTER_CONTROL, 0); program_multi_taps_filter( xfm_dce, data->taps.v_taps, @@ -542,8 +546,7 @@ static void dce60_transform_set_scaler( /* 5. Program horizontal filters */ if (xfm_dce->filter_h == NULL) - REG_SET(SCL_HORZ_FILTER_CONTROL, 0, - SCL_H_2TAP_HARDCODE_COEF_EN, 0); + REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0); program_multi_taps_filter( xfm_dce, data->taps.h_taps, @@ -566,6 +569,8 @@ static void dce60_transform_set_scaler( /* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */ /* DCE6 DATA_FORMAT register does not support ALPHA_EN */ + + REG_WRITE(SCL_UPDATE, 0); } #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h index cbce194ec7b82..eb716e8337e23 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h @@ -155,6 +155,9 @@ SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \ SRI(VIEWPORT_START, SCL, id), \ SRI(VIEWPORT_SIZE, SCL, id), \ + SRI(SCL_SCALER_ENABLE, SCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \ SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \ SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \ SRI(SCL_VERT_FILTER_INIT, SCL, id), \ @@ -590,6 +593,7 @@ struct dce_transform_registers { uint32_t SCL_VERT_FILTER_SCALE_RATIO; uint32_t SCL_HORZ_FILTER_INIT; #if defined(CONFIG_DRM_AMD_DC_SI) + uint32_t SCL_SCALER_ENABLE; uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA; uint32_t SCL_HORZ_FILTER_INIT_CHROMA; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c index 9ba6cb67655f4..6c75aa82327ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c @@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs, if (dual_plane) { unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); - ; if (src->sw_mode == dm_sw_linear) ASSERT(p1_pte_row_height_linear >= 8); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 3279f347660cb..bcb296a954f2b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -287,6 +287,9 @@ void dcn401_init_hw(struct dc *dc) */ struct dc_link *link = dc->links[i]; + if (link->ep_type != DISPLAY_ENDPOINT_PHY) + continue; + link->link_enc->funcs->hw_init(link->link_enc); /* Check for enabled DIG to identify enabled display */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 41c76ba9ba569..62a39204fe0b7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -44,7 +44,13 @@ */ #define MAX_PIPES 6 #define MAX_PHANTOM_PIPES (MAX_PIPES / 2) -#define MAX_LINKS (MAX_PIPES * 2 +2) + +#define MAX_DPIA 6 +#define MAX_CONNECTOR 6 +#define MAX_VIRTUAL_LINKS 4 + +#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS) + #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h index 9de01ae574c03..067eddd9c62d8 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h @@ -4115,6 +4115,7 @@ #define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55 #define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40 #define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41 +#define mmSCL0_SCL_SCALER_ENABLE 0x1B42 #define mmSCL0_SCL_CONTROL 0x1B44 #define mmSCL0_SCL_DEBUG 0x1B6A #define mmSCL0_SCL_DEBUG2 0x1B69 @@ -4144,6 +4145,7 @@ #define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55 #define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40 #define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41 +#define mmSCL1_SCL_SCALER_ENABLE 0x1E42 #define mmSCL1_SCL_CONTROL 0x1E44 #define mmSCL1_SCL_DEBUG 0x1E6A #define mmSCL1_SCL_DEBUG2 0x1E69 @@ -4173,6 +4175,7 @@ #define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155 #define mmSCL2_SCL_COEF_RAM_SELECT 0x4140 #define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141 +#define mmSCL2_SCL_SCALER_ENABLE 0x4142 #define mmSCL2_SCL_CONTROL 0x4144 #define mmSCL2_SCL_DEBUG 0x416A #define mmSCL2_SCL_DEBUG2 0x4169 @@ -4202,6 +4205,7 @@ #define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455 #define mmSCL3_SCL_COEF_RAM_SELECT 0x4440 #define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441 +#define mmSCL3_SCL_SCALER_ENABLE 0x4442 #define mmSCL3_SCL_CONTROL 0x4444 #define mmSCL3_SCL_DEBUG 0x446A #define mmSCL3_SCL_DEBUG2 0x4469 @@ -4231,6 +4235,7 @@ #define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755 #define mmSCL4_SCL_COEF_RAM_SELECT 0x4740 #define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741 +#define mmSCL4_SCL_SCALER_ENABLE 0x4742 #define mmSCL4_SCL_CONTROL 0x4744 #define mmSCL4_SCL_DEBUG 0x476A #define mmSCL4_SCL_DEBUG2 0x4769 @@ -4260,6 +4265,7 @@ #define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55 #define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40 #define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41 +#define mmSCL5_SCL_SCALER_ENABLE 0x4A42 #define mmSCL5_SCL_CONTROL 0x4A44 #define mmSCL5_SCL_DEBUG 0x4A6A #define mmSCL5_SCL_DEBUG2 0x4A69 @@ -4287,6 +4293,7 @@ #define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55 #define mmSCL_COEF_RAM_SELECT 0x1B40 #define mmSCL_COEF_RAM_TAP_DATA 0x1B41 +#define mmSCL_SCALER_ENABLE 0x1B42 #define mmSCL_CONTROL 0x1B44 #define mmSCL_DEBUG 0x1B6A #define mmSCL_DEBUG2 0x1B69 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h index bd8085ec54ed5..da5596fbfdcb3 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h @@ -8648,6 +8648,8 @@ #define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000 #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000 +#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L +#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000 #define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L #define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000 #define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index 21ceafce1f9b2..ab1cfc92dbeb1 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -230,13 +230,24 @@ union MESAPI_SET_HW_RESOURCES { uint32_t disable_add_queue_wptr_mc_addr : 1; uint32_t enable_mes_event_int_logging : 1; uint32_t enable_reg_active_poll : 1; - uint32_t reserved : 21; + uint32_t use_disable_queue_in_legacy_uq_preemption : 1; + uint32_t send_write_data : 1; + uint32_t os_tdr_timeout_override : 1; + uint32_t use_rs64mem_for_proc_gang_ctx : 1; + uint32_t use_add_queue_unmap_flag_addr : 1; + uint32_t enable_mes_sch_stb_log : 1; + uint32_t limit_single_process : 1; + uint32_t is_strix_tmz_wa_enabled :1; + uint32_t enable_lr_compute_wa : 1; + uint32_t reserved : 12; }; uint32_t uint32_t_all; }; uint32_t oversubscription_timer; uint64_t doorbell_info; uint64_t event_intr_history_gpu_mc_ptr; + uint64_t timestamp; + uint32_t os_tdr_timeout_in_sec; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -256,7 +267,8 @@ union MESAPI_SET_HW_RESOURCES_1 { }; uint64_t mes_info_ctx_mc_addr; uint32_t mes_info_ctx_size; - uint32_t mes_kiq_unmap_timeout; // unit is 100ms + uint64_t reserved1; + uint64_t cleaner_shader_fence_mc_addr; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -563,6 +575,11 @@ enum MESAPI_MISC_OPCODE { MESAPI_MISC__READ_REG, MESAPI_MISC__WAIT_REG_MEM, MESAPI_MISC__SET_SHADER_DEBUGGER, + MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE, + MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES, + MESAPI_MISC__CHANGE_CONFIG, + MESAPI_MISC__LAUNCH_CLEANER_SHADER, + MESAPI_MISC__MAX, }; @@ -617,6 +634,31 @@ struct SET_SHADER_DEBUGGER { uint32_t trap_en; }; +enum MESAPI_MISC__CHANGE_CONFIG_OPTION { + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0, + MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1, + MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2, + + MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F +}; + +struct CHANGE_CONFIG { + enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode; + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 31; + } bits; + uint32_t all; + } option; + + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; +}; + union MESAPI__MISC { struct { union MES_API_HEADER header; @@ -631,6 +673,7 @@ union MESAPI__MISC { struct WAIT_REG_MEM wait_reg_mem; struct SET_SHADER_DEBUGGER set_shader_debugger; enum MES_AMD_PRIORITY_LEVEL queue_sch_level; + struct CHANGE_CONFIG change_config; uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS]; }; diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h index 101e2fe962c6a..a402974939d63 100644 --- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h @@ -105,6 +105,43 @@ struct MES_API_STATUS { uint64_t api_completion_fence_value; }; +/* + * MES will set api_completion_fence_value in api_completion_fence_addr + * when it can successflly process the API. MES will also trigger + * following interrupt when it finish process the API no matter success + * or failed. + * Interrupt source id 181 (EOP) with context ID (DW 6 in the int + * cookie) set to 0xb1 and context type set to 8. Driver side need + * to enable TIME_STAMP_INT_ENABLE in CPC_INT_CNTL for MES pipe to + * catch this interrupt. + * Driver side also need to set enable_mes_fence_int = 1 in + * set_HW_resource package to enable this fence interrupt. + * when the API process failed. + * lowre 32 bits set to 0. + * higher 32 bits set as follows (bit shift within high 32) + * bit 0 - 7 API specific error code. + * bit 8 - 15 API OPCODE. + * bit 16 - 23 MISC OPCODE if any + * bit 24 - 30 ERROR category (API_ERROR_XXX) + * bit 31 Set to 1 to indicate error status + * + */ +enum { MES_SCH_ERROR_CODE_HEADER_SHIFT_12 = 8 }; +enum { MES_SCH_ERROR_CODE_MISC_OP_SHIFT_12 = 16 }; +enum { MES_ERROR_CATEGORY_SHIFT_12 = 24 }; +enum { MES_API_STATUS_ERROR_SHIFT_12 = 31 }; + +enum MES_ERROR_CATEGORY_CODE_12 { + MES_ERROR_API = 1, + MES_ERROR_SCHEDULING = 2, + MES_ERROR_UNKNOWN = 3, +}; + +#define MES_ERR_CODE(api_err, opcode, misc_op, category) \ + ((uint64) (api_err | opcode << MES_SCH_ERROR_CODE_HEADER_SHIFT_12 | \ + misc_op << MES_SCH_ERROR_CODE_MISC_OP_SHIFT_12 | \ + category << MES_ERROR_CATEGORY_SHIFT_12 | \ + 1 << MES_API_STATUS_ERROR_SHIFT_12) << 32) enum { MAX_COMPUTE_PIPES = 8 }; enum { MAX_GFX_PIPES = 2 }; @@ -248,7 +285,9 @@ union MESAPI_SET_HW_RESOURCES { uint32_t enable_mes_sch_stb_log : 1; uint32_t limit_single_process : 1; uint32_t unmapped_doorbell_handling: 2; - uint32_t reserved : 11; + uint32_t enable_mes_fence_int: 1; + uint32_t enable_lr_compute_wa : 1; + uint32_t reserved : 9; }; uint32_t uint32_all; }; @@ -278,6 +317,8 @@ union MESAPI_SET_HW_RESOURCES_1 { uint32_t mes_debug_ctx_size; /* unit is 100ms */ uint32_t mes_kiq_unmap_timeout; + uint64_t reserved1; + uint64_t cleaner_shader_fence_mc_addr; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -643,6 +684,10 @@ enum MESAPI_MISC_OPCODE { MESAPI_MISC__SET_SHADER_DEBUGGER, MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE, MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES, + MESAPI_MISC__QUERY_HUNG_ENGINE_ID, + MESAPI_MISC__CHANGE_CONFIG, + MESAPI_MISC__LAUNCH_CLEANER_SHADER, + MESAPI_MISC__SETUP_MES_DBGEXT, MESAPI_MISC__MAX, }; @@ -713,6 +758,31 @@ struct SET_GANG_SUBMIT { uint32_t slave_gang_context_array_index; }; +enum MESAPI_MISC__CHANGE_CONFIG_OPTION { + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0, + MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1, + MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2, + + MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F +}; + +struct CHANGE_CONFIG { + enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode; + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 30; + } bits; + uint32_t all; + } option; + + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; +}; + union MESAPI__MISC { struct { union MES_API_HEADER header; @@ -726,7 +796,7 @@ union MESAPI__MISC { struct WAIT_REG_MEM wait_reg_mem; struct SET_SHADER_DEBUGGER set_shader_debugger; enum MES_AMD_PRIORITY_LEVEL queue_sch_level; - + struct CHANGE_CONFIG change_config; uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS]; }; uint64_t timestamp; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c index 42efe838fa85c..2d2d2d5e67634 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c @@ -66,6 +66,13 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) (amdgpu_crtc->v_border * 2)); vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; + + /* we have issues with mclk switching with + * refresh rates over 120 hz on the non-DC code. + */ + if (drm_mode_vrefresh(&amdgpu_crtc->hw_mode) > 120) + vblank_time_us = 0; + break; } } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index a5ad1b60597e6..82167eca26683 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3066,7 +3066,13 @@ static bool si_dpm_vblank_too_short(void *handle) /* we never hit the non-gddr5 limit so disable it */ u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; - if (vblank_time < switch_limit) + /* Consider zero vblank time too short and disable MCLK switching. + * Note that the vblank time is set to maximum when no displays are attached, + * so we'll still enable MCLK switching in that case. + */ + if (vblank_time == 0) + return true; + else if (vblank_time < switch_limit) return true; else return false; @@ -3424,12 +3430,14 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, { struct si_ps *ps = si_get_ps(rps); struct amdgpu_clock_and_voltage_limits *max_limits; + struct amdgpu_connector *conn; bool disable_mclk_switching = false; bool disable_sclk_switching = false; u32 mclk, sclk; u16 vddc, vddci, min_vce_voltage = 0; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; u32 max_sclk = 0, max_mclk = 0; + u32 high_pixelclock_count = 0; int i; if (adev->asic_type == CHIP_HAINAN) { @@ -3457,6 +3465,35 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, } } + /* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz. + * For example, 4K 60Hz and 1080p 144Hz fall into this category. + * Find number of such displays connected. + */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (!(adev->pm.dpm.new_active_crtcs & (1 << i)) || + !adev->mode_info.crtcs[i]->enabled) + continue; + + conn = to_amdgpu_connector(adev->mode_info.crtcs[i]->connector); + + if (conn->pixelclock_for_modeset > 297000) + high_pixelclock_count++; + } + + /* These are some ad-hoc fixes to some issues observed with SI GPUs. + * They are necessary because we don't have something like dce_calcs + * for these GPUs to calculate bandwidth requirements. + */ + if (high_pixelclock_count) { + /* On Oland, we observe some flickering when two 4K 60Hz + * displays are connected, possibly because voltage is too low. + * Raise the voltage by requiring a higher SCLK. + * (Voltage cannot be adjusted independently without also SCLK.) + */ + if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND) + disable_sclk_switching = true; + } + if (rps->vce_active) { rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; @@ -5613,14 +5650,10 @@ static int si_populate_smc_t(struct amdgpu_device *adev, static int si_disable_ulv(struct amdgpu_device *adev) { - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - - if (ulv->supported) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? - 0 : -EINVAL; + PPSMC_Result r; - return 0; + r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV); + return (r == PPSMC_Result_OK) ? 0 : -EINVAL; } static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, @@ -5793,9 +5826,9 @@ static int si_upload_smc_data(struct amdgpu_device *adev) { struct amdgpu_crtc *amdgpu_crtc = NULL; int i; - - if (adev->pm.dpm.new_active_crtc_count == 0) - return 0; + u32 crtc_index = 0; + u32 mclk_change_block_cp_min = 0; + u32 mclk_change_block_cp_max = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) { if (adev->pm.dpm.new_active_crtcs & (1 << i)) { @@ -5804,26 +5837,31 @@ static int si_upload_smc_data(struct amdgpu_device *adev) } } - if (amdgpu_crtc == NULL) - return 0; + /* When a display is plugged in, program these so that the SMC + * performs MCLK switching when it doesn't cause flickering. + * When no display is plugged in, there is no need to restrict + * MCLK switching, so program them to zero. + */ + if (adev->pm.dpm.new_active_crtc_count && amdgpu_crtc) { + crtc_index = amdgpu_crtc->crtc_id; - if (amdgpu_crtc->line_time <= 0) - return 0; + if (amdgpu_crtc->line_time) { + mclk_change_block_cp_min = 200 / amdgpu_crtc->line_time; + mclk_change_block_cp_max = 100 / amdgpu_crtc->line_time; + } + } - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_crtc_index, - amdgpu_crtc->crtc_id) != PPSMC_Result_OK) - return 0; + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_crtc_index, + crtc_index); - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, - amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK) - return 0; + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, + mclk_change_block_cp_min); - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, - amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK) - return 0; + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, + mclk_change_block_cp_max); return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 632a25957477e..3018e294673a5 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5444,8 +5444,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; else if (hwmgr->pp_table_version == PP_TABLE_V0) - thermal_data->max = data->thermal_temp_setting.temperature_shutdown * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + thermal_data->max = data->thermal_temp_setting.temperature_shutdown; thermal_data->sw_ctf_threshold = thermal_data->max; diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 5dadc895e7f26..5e87ca2320a42 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -79,7 +79,7 @@ static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, si * 3. The Delays are often longer a lot when system resume from S3/S4. */ if (j) - mdelay(j + 1); + msleep(j + 1); /* Wait for EDID offset to show up in mirror register */ vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7); diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 3eb955333c809..66369b6a023f0 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -101,6 +101,7 @@ config DRM_ITE_IT6505 select EXTCON select CRYPTO select CRYPTO_HASH + select REGMAP_I2C help ITE IT6505 DisplayPort bridge chip driver. diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c index c8881796fba4c..4014375f06ea1 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9211.c +++ b/drivers/gpu/drm/bridge/lontium-lt9211.c @@ -120,8 +120,7 @@ static int lt9211_read_chipid(struct lt9211 *ctx) } /* Test for known Chip ID. */ - if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE || - chipid[2] != REG_CHIPID2_VALUE) { + if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE) { dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n", chipid[0], chipid[1], chipid[2]); return -EINVAL; diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c index d41f8ae1c1483..b61ebc5bdd5ce 100644 --- a/drivers/gpu/drm/drm_draw.c +++ b/drivers/gpu/drm/drm_draw.c @@ -125,7 +125,7 @@ EXPORT_SYMBOL(drm_draw_fill16); void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch, unsigned int height, unsigned int width, - u16 color) + u32 color) { unsigned int y, x; diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h index f121ee7339dc1..20cb404e23ea6 100644 --- a/drivers/gpu/drm/drm_draw_internal.h +++ b/drivers/gpu/drm/drm_draw_internal.h @@ -47,7 +47,7 @@ void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch, void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch, unsigned int height, unsigned int width, - u16 color); + u32 color); void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch, unsigned int height, unsigned int width, diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c index f128d345b16df..0aa87eafdacd5 100644 --- a/drivers/gpu/drm/drm_panic.c +++ b/drivers/gpu/drm/drm_panic.c @@ -306,6 +306,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect, const struct font_desc *font, u32 fg_color) { + if (rect->x2 > sb->width || rect->y2 > sb->height) + return; + if (logo_mono) drm_panic_blit(sb, rect, logo_mono->data, DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color); @@ -615,7 +618,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb) pr_debug("QR width %d and scale %d\n", qr_width, scale); r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale); - v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5; + v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg); + if (v_margin < 0) + return -ENOSPC; + v_margin /= 5; drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin); r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale, diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 9eeba254cf45d..2a218d2058428 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -51,7 +51,6 @@ struct decon_context { void __iomem *regs; unsigned long irq_flags; bool i80_if; - bool suspended; wait_queue_head_t wait_vsync_queue; atomic_t wait_vsync_event; @@ -81,13 +80,30 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = { DRM_PLANE_TYPE_CURSOR, }; -static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) +/** + * decon_shadow_protect_win() - disable updating values from shadow registers at vsync + * + * @ctx: display and enhancement controller context + * @win: window to protect registers for + * @protect: 1 to protect (disable updates) + */ +static void decon_shadow_protect_win(struct decon_context *ctx, + unsigned int win, bool protect) { - struct decon_context *ctx = crtc->ctx; + u32 bits, val; - if (ctx->suspended) - return; + bits = SHADOWCON_WINx_PROTECT(win); + + val = readl(ctx->regs + SHADOWCON); + if (protect) + val |= bits; + else + val &= ~bits; + writel(val, ctx->regs + SHADOWCON); +} +static void decon_wait_for_vblank(struct decon_context *ctx) +{ atomic_set(&ctx->wait_vsync_event, 1); /* @@ -100,25 +116,33 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n"); } -static void decon_clear_channels(struct exynos_drm_crtc *crtc) +static void decon_clear_channels(struct decon_context *ctx) { - struct decon_context *ctx = crtc->ctx; unsigned int win, ch_enabled = 0; + u32 val; /* Check if any channel is enabled. */ for (win = 0; win < WINDOWS_NR; win++) { - u32 val = readl(ctx->regs + WINCON(win)); + val = readl(ctx->regs + WINCON(win)); if (val & WINCONx_ENWIN) { + decon_shadow_protect_win(ctx, win, true); + val &= ~WINCONx_ENWIN; writel(val, ctx->regs + WINCON(win)); ch_enabled = 1; + + decon_shadow_protect_win(ctx, win, false); } } + val = readl(ctx->regs + DECON_UPDATE); + val |= DECON_UPDATE_STANDALONE_F; + writel(val, ctx->regs + DECON_UPDATE); + /* Wait for vsync, as disable channel takes effect at next vsync */ if (ch_enabled) - decon_wait_for_vblank(ctx->crtc); + decon_wait_for_vblank(ctx); } static int decon_ctx_initialize(struct decon_context *ctx, @@ -126,7 +150,7 @@ static int decon_ctx_initialize(struct decon_context *ctx, { ctx->drm_dev = drm_dev; - decon_clear_channels(ctx->crtc); + decon_clear_channels(ctx); return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv); } @@ -155,9 +179,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc) struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; u32 val, clkdiv; - if (ctx->suspended) - return; - /* nothing to do if we haven't set the mode yet */ if (mode->htotal == 0 || mode->vtotal == 0) return; @@ -219,9 +240,6 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc) struct decon_context *ctx = crtc->ctx; u32 val; - if (ctx->suspended) - return -EPERM; - if (!test_and_set_bit(0, &ctx->irq_flags)) { val = readl(ctx->regs + VIDINTCON0); @@ -244,9 +262,6 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc) struct decon_context *ctx = crtc->ctx; u32 val; - if (ctx->suspended) - return; - if (test_and_clear_bit(0, &ctx->irq_flags)) { val = readl(ctx->regs + VIDINTCON0); @@ -343,36 +358,11 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win) writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); } -/** - * decon_shadow_protect_win() - disable updating values from shadow registers at vsync - * - * @ctx: display and enhancement controller context - * @win: window to protect registers for - * @protect: 1 to protect (disable updates) - */ -static void decon_shadow_protect_win(struct decon_context *ctx, - unsigned int win, bool protect) -{ - u32 bits, val; - - bits = SHADOWCON_WINx_PROTECT(win); - - val = readl(ctx->regs + SHADOWCON); - if (protect) - val |= bits; - else - val &= ~bits; - writel(val, ctx->regs + SHADOWCON); -} - static void decon_atomic_begin(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; int i; - if (ctx->suspended) - return; - for (i = 0; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, true); } @@ -392,9 +382,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, unsigned int cpp = fb->format->cpp[0]; unsigned int pitch = fb->pitches[0]; - if (ctx->suspended) - return; - /* * SHADOWCON/PRTCON register is used for enabling timing. * @@ -482,9 +469,6 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, unsigned int win = plane->index; u32 val; - if (ctx->suspended) - return; - /* protect windows */ decon_shadow_protect_win(ctx, win, true); @@ -503,9 +487,6 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) struct decon_context *ctx = crtc->ctx; int i; - if (ctx->suspended) - return; - for (i = 0; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, false); exynos_crtc_handle_event(crtc); @@ -533,9 +514,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc) struct decon_context *ctx = crtc->ctx; int ret; - if (!ctx->suspended) - return; - ret = pm_runtime_resume_and_get(ctx->dev); if (ret < 0) { DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n"); @@ -549,8 +527,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc) decon_enable_vblank(ctx->crtc); decon_commit(ctx->crtc); - - ctx->suspended = false; } static void decon_atomic_disable(struct exynos_drm_crtc *crtc) @@ -558,9 +534,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc) struct decon_context *ctx = crtc->ctx; int i; - if (ctx->suspended) - return; - /* * We need to make sure that all windows are disabled before we * suspend that connector. Otherwise we might try to scan from @@ -570,8 +543,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc) decon_disable_plane(crtc, &ctx->planes[i]); pm_runtime_put_sync(ctx->dev); - - ctx->suspended = true; } static const struct exynos_drm_crtc_ops decon_crtc_ops = { @@ -692,7 +663,6 @@ static int decon_probe(struct platform_device *pdev) return -ENOMEM; ctx->dev = dev; - ctx->suspended = true; i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings"); if (i80_if_timings) diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index ed8626c73541c..f0ae675581d9a 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -726,8 +726,8 @@ void oaktrail_hdmi_teardown(struct drm_device *dev) if (hdmi_dev) { pdev = hdmi_dev->dev; - pci_set_drvdata(pdev, NULL); oaktrail_hdmi_i2c_exit(pdev); + pci_set_drvdata(pdev, NULL); iounmap(hdmi_dev->regs); kfree(hdmi_dev); pci_dev_put(pdev); diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 9e05745d797d1..4d93781f2dd76 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -40,8 +40,9 @@ static u32 scale(u32 source_val, { u64 target_val; - WARN_ON(source_min > source_max); - WARN_ON(target_min > target_max); + if (WARN_ON(source_min >= source_max) || + WARN_ON(target_min > target_max)) + return target_min; /* defensive */ source_val = clamp(source_val, source_min, source_max); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 0d5197c0824a9..5cf3a516ccfb3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -1324,9 +1324,16 @@ static int ct_receive(struct intel_guc_ct *ct) static void ct_try_receive_message(struct intel_guc_ct *ct) { + struct intel_guc *guc = ct_to_guc(ct); int ret; - if (GEM_WARN_ON(!ct->enabled)) + if (!ct->enabled) { + GEM_WARN_ON(!guc_to_gt(guc)->uc.reset_in_progress); + return; + } + + /* When interrupt disabled, message handling is not expected */ + if (!guc->interrupts.enabled) return; ret = ct_receive(ct); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 67fa528f546d3..8609fa38058ea 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -236,6 +236,8 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) if (ret) DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); + set_bit(GMU_STATUS_FW_START, &gmu->status); + return ret; } @@ -482,6 +484,9 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) int ret; u32 val; + if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status)) + return 0; + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1)); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, @@ -509,6 +514,9 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) int ret; u32 val; + if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status)) + return; + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, @@ -517,6 +525,8 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); + + set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status); } static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) @@ -645,8 +655,6 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) /* ensure no writes happen before the uCode is fully written */ wmb(); - a6xx_rpmh_stop(gmu); - err: if (!IS_ERR_OR_NULL(pdcptr)) iounmap(pdcptr); @@ -799,19 +807,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) else gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); - if (state == GMU_WARM_BOOT) { - ret = a6xx_rpmh_start(gmu); - if (ret) - return ret; - } else { + ret = a6xx_rpmh_start(gmu); + if (ret) + return ret; + + if (state == GMU_COLD_BOOT) { if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], "GMU firmware is not loaded\n")) return -ENOENT; - ret = a6xx_rpmh_start(gmu); - if (ret) - return ret; - ret = a6xx_gmu_fw_load(gmu); if (ret) return ret; @@ -980,6 +984,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) /* Reset GPU core blocks */ a6xx_gpu_sw_reset(gpu, true); + + a6xx_rpmh_stop(gmu); } static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 94b6c5cab6f43..db5b3b13e7435 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -99,6 +99,12 @@ struct a6xx_gmu { struct completion pd_gate; struct qmp *qmp; + +/* To check if we can trigger sleep seq at PDC. Cleared in a6xx_rpmh_stop() */ +#define GMU_STATUS_FW_START 0 +/* To track if PDC sleep seq was done */ +#define GMU_STATUS_PDC_SLEEP 1 + unsigned long status; }; static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 07035ab77b792..4597fdb653588 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -445,7 +445,7 @@ static void _dpu_encoder_phys_wb_handle_wbdone_timeout( static int dpu_encoder_phys_wb_wait_for_commit_done( struct dpu_encoder_phys *phys_enc) { - unsigned long ret; + int ret; struct dpu_encoder_wait_info wait_info; struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2016c1e7242fe..fc433f39c127f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -852,7 +852,7 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, nvif_vmm_put(vmm, &old_mem->vma[1]); nvif_vmm_put(vmm, &old_mem->vma[0]); } - return 0; + return ret; } static int diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c index 5bbea734123bc..ee04c55175bb8 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c @@ -161,7 +161,7 @@ static int nt35560_set_brightness(struct backlight_device *bl) par = 0x00; ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, &par, 1); - if (ret) { + if (ret < 0) { dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret); return ret; } diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index 03eb7d52209a2..9360d486e52e5 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1032,14 +1032,15 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data, ret = group_priority_permit(file, args->priority); if (ret) - return ret; + goto out; ret = panthor_group_create(pfile, args, queue_args); - if (ret >= 0) { - args->group_handle = ret; - ret = 0; - } + if (ret < 0) + goto out; + args->group_handle = ret; + ret = 0; +out: kvfree(queue_args); return ret; } diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 4e2d3a02ea068..cdd6e1c08cebd 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -1057,6 +1057,7 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang) } panthor_job_irq_suspend(&ptdev->fw->irq); + panthor_fw_stop(ptdev); } /** diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index b57824abeb9ee..2214dbf472fa4 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -1168,10 +1168,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx) break; case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: - /* Partial unmaps might trigger a remap with either a prev or a next VA, - * but not both. + /* Two VMAs can be needed for an unmap, as an unmap can happen + * in the middle of a drm_gpuva, requiring a remap with both + * prev & next VA. Or an unmap can span more than one drm_gpuva + * where the first and last ones are covered partially, requring + * a remap for the first with a prev VA and remap for the last + * with a next VA. */ - vma_count = 1; + vma_count = 2; break; default: diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 20135a9bc026e..0bc5b69ec636b 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -865,8 +865,7 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * if (IS_ERR_OR_NULL(queue)) return; - if (queue->entity.fence_context) - drm_sched_entity_destroy(&queue->entity); + drm_sched_entity_destroy(&queue->entity); if (queue->scheduler.ops) drm_sched_fini(&queue->scheduler); @@ -3458,11 +3457,6 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) if (!group) return -EINVAL; - for (u32 i = 0; i < group->queue_count; i++) { - if (group->queues[i]) - drm_sched_entity_destroy(&group->queues[i]->entity); - } - mutex_lock(&sched->reset.lock); mutex_lock(&sched->lock); group->destroyed = true; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index ac77d1246b945..811265648a582 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -1408,7 +1408,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, unsigned block_align, unsigned height_align, unsigned base_align, unsigned *l0_size, unsigned *mipmap_size) { - unsigned offset, i, level; + unsigned offset, i; unsigned width, height, depth, size; unsigned blocksize; unsigned nbx, nby; @@ -1420,7 +1420,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, w0 = r600_mip_minify(w0, 0); h0 = r600_mip_minify(h0, 0); d0 = r600_mip_minify(d0, 0); - for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { + for (i = 0, offset = 0; i < nlevels; i++) { width = r600_mip_minify(w0, i); nbx = r600_fmt_get_nblocksx(format, width); diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c index 92f4261305bd9..f2ae5d17ea601 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c @@ -576,7 +576,10 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, udelay(10); rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); - ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN; + rcar_mipi_dsi_clr(dsi, TXSETR, TXSETR_LANECNT_MASK); + rcar_mipi_dsi_set(dsi, TXSETR, dsi->lanes - 1); + + ppisetr = ((BIT(dsi->lanes) - 1) & PPISETR_DLEN_MASK) | PPISETR_CLEN; rcar_mipi_dsi_write(dsi, PPISETR, ppisetr); rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h index a6b276f1d6ee1..a54c7eb4113b9 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h @@ -12,6 +12,9 @@ #define LINKSR_LPBUSY (1 << 1) #define LINKSR_HSBUSY (1 << 0) +#define TXSETR 0x100 +#define TXSETR_LANECNT_MASK (0x3 << 0) + /* * Video Mode Register */ @@ -80,10 +83,7 @@ * PHY-Protocol Interface (PPI) Registers */ #define PPISETR 0x700 -#define PPISETR_DLEN_0 (0x1 << 0) -#define PPISETR_DLEN_1 (0x3 << 0) -#define PPISETR_DLEN_2 (0x7 << 0) -#define PPISETR_DLEN_3 (0xf << 0) +#define PPISETR_DLEN_MASK (0xf << 0) #define PPISETR_CLEN (1 << 8) #define PPICLCR 0x710 diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 5d7df4c3b08c4..a551458ad4340 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1118,7 +1118,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, return format; if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 || - drm_rect_width(dest) < 4 || drm_rect_width(dest) < 4) { + drm_rect_width(dest) < 4 || drm_rect_height(dest) < 4) { drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n", drm_rect_width(src) >> 16, drm_rect_height(src) >> 16, drm_rect_width(dest), drm_rect_height(dest)); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 416590ea0dc3d..d5260cb1ed0ec 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -952,13 +952,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, dma_resv_assert_held(resv); dma_resv_for_each_fence(&cursor, resv, usage, fence) { - /* Make sure to grab an additional ref on the added fence */ - dma_fence_get(fence); - ret = drm_sched_job_add_dependency(job, fence); - if (ret) { - dma_fence_put(fence); + /* + * As drm_sched_job_add_dependency always consumes the fence + * reference (even when it fails), and dma_resv_for_each_fence + * is not obtaining one, we need to grab one before calling. + */ + ret = drm_sched_job_add_dependency(job, dma_fence_get(fence)); + if (ret) return ret; - } } return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index ea741bc4ac3fc..8b72848bb25cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1515,6 +1515,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { struct vmw_bo *vmw_bo = NULL; + struct vmw_resource *res; struct vmw_surface *srf = NULL; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA); int ret; @@ -1550,18 +1551,24 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ? VMW_RES_DIRTY_SET : 0; - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - dirty, user_surface_converter, - &cmd->body.host.sid, NULL); + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, dirty, + user_surface_converter, &cmd->body.host.sid, + NULL); if (unlikely(ret != 0)) { if (unlikely(ret != -ERESTARTSYS)) VMW_DEBUG_USER("could not find surface for DMA.\n"); return ret; } - srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); + res = sw_context->res_cache[vmw_res_surface].res; + if (!res) { + VMW_DEBUG_USER("Invalid DMA surface.\n"); + return -EINVAL; + } - vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header); + srf = vmw_res_to_srf(res); + vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, + header); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index e7625b3f71e0e..80e8bbc3021d1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -309,8 +309,10 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx, hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key); } node->res = vmw_resource_reference_unless_doomed(res); - if (!node->res) + if (!node->res) { + hash_del_rcu(&node->hash.head); return -ESRCH; + } node->first_usage = 1; if (!res->dev_priv->has_mob) { @@ -637,7 +639,7 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx) hash_del_rcu(&val->hash.head); list_for_each_entry(val, &ctx->resource_ctx_list, head) - hash_del_rcu(&entry->hash.head); + hash_del_rcu(&val->hash.head); ctx->sw_context = NULL; } diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index cf6946424fc35..03d674e9e8075 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -41,6 +41,7 @@ #include "xe_ring_ops_types.h" #include "xe_sched_job.h" #include "xe_trace.h" +#include "xe_uc_fw.h" #include "xe_vm.h" static struct xe_guc * @@ -1285,7 +1286,17 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg) xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT)); trace_xe_exec_queue_cleanup_entity(q); - if (exec_queue_registered(q)) + /* + * Expected state transitions for cleanup: + * - If the exec queue is registered and GuC firmware is running, we must first + * disable scheduling and deregister the queue to ensure proper teardown and + * resource release in the GuC, then destroy the exec queue on driver side. + * - If the GuC is already stopped (e.g., during driver unload or GPU reset), + * we cannot expect a response for the deregister request. In this case, + * it is safe to directly destroy the exec queue on driver side, as the GuC + * will not process further requests and all resources must be cleaned up locally. + */ + if (exec_queue_registered(q) && xe_uc_fw_is_running(&guc->fw)) disable_scheduling_deregister(guc, q); else __guc_exec_queue_fini(guc, q); diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c index 82750520a90a5..f14a3cc7d1173 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_group.c +++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c @@ -237,17 +237,13 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group err = q->ops->suspend_wait(q); if (err) - goto err_suspend; + return err; } if (need_resume) xe_hw_engine_group_resume_faulting_lr_jobs(group); return 0; - -err_suspend: - up_write(&group->mode_sem); - return err; } /** diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 6e7c940d7e227..71a5e852fbac7 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -277,8 +277,7 @@ static int query_mem_regions(struct xe_device *xe, mem_regions->mem_regions[0].instance = 0; mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; - if (perfmon_capable()) - mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); + mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); mem_regions->num_mem_regions = 1; for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { @@ -294,13 +293,11 @@ static int query_mem_regions(struct xe_device *xe, mem_regions->mem_regions[mem_regions->num_mem_regions].total_size = man->size; - if (perfmon_capable()) { - xe_ttm_vram_get_used(man, - &mem_regions->mem_regions - [mem_regions->num_mem_regions].used, - &mem_regions->mem_regions - [mem_regions->num_mem_regions].cpu_visible_used); - } + xe_ttm_vram_get_used(man, + &mem_regions->mem_regions + [mem_regions->num_mem_regions].used, + &mem_regions->mem_regions + [mem_regions->num_mem_regions].cpu_visible_used); mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size = xe_ttm_vram_get_cpu_visible_size(man); diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c index 3438d392920fa..8dae9a7766853 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c @@ -39,8 +39,12 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type) struct amdtp_hid_data *hid_data = hid->driver_data; struct amdtp_cl_data *cli_data = hid_data->cli_data; struct request_list *req_list = &cli_data->req_list; + struct amd_input_data *in_data = cli_data->in_data; + struct amd_mp2_dev *mp2; int i; + mp2 = container_of(in_data, struct amd_mp2_dev, in_data); + guard(mutex)(&mp2->lock); for (i = 0; i < cli_data->num_hid_devices; i++) { if (cli_data->hid_sensor_hubs[i] == hid) { struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL); @@ -75,6 +79,8 @@ void amd_sfh_work(struct work_struct *work) u8 report_id, node_type; u8 report_size = 0; + mp2 = container_of(in_data, struct amd_mp2_dev, in_data); + guard(mutex)(&mp2->lock); req_node = list_last_entry(&req_list->list, struct request_list, list); list_del(&req_node->list); current_index = req_node->current_index; @@ -83,7 +89,6 @@ void amd_sfh_work(struct work_struct *work) node_type = req_node->report_type; kfree(req_node); - mp2 = container_of(in_data, struct amd_mp2_dev, in_data); mp2_ops = mp2->mp2_ops; if (node_type == HID_FEATURE_REPORT) { report_size = mp2_ops->get_feat_rep(sensor_index, report_id, @@ -107,6 +112,8 @@ void amd_sfh_work(struct work_struct *work) cli_data->cur_hid_dev = current_index; cli_data->sensor_requested_cnt[current_index] = 0; amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]); + if (!list_empty(&req_list->list)) + schedule_delayed_work(&cli_data->work, 0); } void amd_sfh_work_buffer(struct work_struct *work) @@ -117,9 +124,10 @@ void amd_sfh_work_buffer(struct work_struct *work) u8 report_size; int i; + mp2 = container_of(in_data, struct amd_mp2_dev, in_data); + guard(mutex)(&mp2->lock); for (i = 0; i < cli_data->num_hid_devices; i++) { if (cli_data->sensor_sts[i] == SENSOR_ENABLED) { - mp2 = container_of(in_data, struct amd_mp2_dev, in_data); report_size = mp2->mp2_ops->get_in_rep(i, cli_data->sensor_idx[i], cli_data->report_id[i], in_data); hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT, diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h index e5620d7db5690..00308d8998d4d 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h @@ -10,6 +10,7 @@ #ifndef AMD_SFH_COMMON_H #define AMD_SFH_COMMON_H +#include #include #include "amd_sfh_hid.h" @@ -57,6 +58,8 @@ struct amd_mp2_dev { u32 mp2_acs; struct sfh_dev_status dev_en; struct work_struct work; + /* mp2 to protect data */ + struct mutex lock; u8 init_done; u8 rver; }; diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c index 0c28ca349bcd3..9739f66e925c0 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c @@ -405,6 +405,10 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i if (!privdata->cl_data) return -ENOMEM; + rc = devm_mutex_init(&pdev->dev, &privdata->lock); + if (rc) + return rc; + privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data; if (privdata->sfh1_1_ops) { if (boot_cpu_data.x86 >= 0x1A) diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 6b90d2c03e889..dd989b519f86e 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -971,7 +971,10 @@ static int asus_input_mapping(struct hid_device *hdev, case 0xc4: asus_map_key_clear(KEY_KBDILLUMUP); break; case 0xc5: asus_map_key_clear(KEY_KBDILLUMDOWN); break; case 0xc7: asus_map_key_clear(KEY_KBDILLUMTOGGLE); break; + case 0x4e: asus_map_key_clear(KEY_FN_ESC); break; + case 0x7e: asus_map_key_clear(KEY_EMOJI_PICKER); break; + case 0x8b: asus_map_key_clear(KEY_PROG1); break; /* ProArt Creator Hub key */ case 0x6b: asus_map_key_clear(KEY_F21); break; /* ASUS touchpad toggle */ case 0x38: asus_map_key_clear(KEY_PROG1); break; /* ROG key */ case 0xba: asus_map_key_clear(KEY_PROG2); break; /* Fn+C ASUS Splendid */ diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index f5c217ac4bfaa..f073d5621050a 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -622,7 +622,10 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage, return; } - if (value == 0 || value < dev->battery_min || value > dev->battery_max) + if ((usage & HID_USAGE_PAGE) == HID_UP_DIGITIZER && value == 0) + return; + + if (value < dev->battery_min || value > dev->battery_max) return; capacity = hidinput_scale_battery_capacity(dev, value); diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c index 0f93c22a479f3..83941b916cd6b 100644 --- a/drivers/hid/hid-mcp2221.c +++ b/drivers/hid/hid-mcp2221.c @@ -814,6 +814,10 @@ static int mcp2221_raw_event(struct hid_device *hdev, } if (data[2] == MCP2221_I2C_READ_COMPL || data[2] == MCP2221_I2C_READ_PARTIAL) { + if (!mcp->rxbuf || mcp->rxbuf_idx < 0 || data[3] > 60) { + mcp->status = -EINVAL; + break; + } buf = mcp->rxbuf; memcpy(&buf[mcp->rxbuf_idx], &data[4], data[3]); mcp->rxbuf_idx = mcp->rxbuf_idx + data[3]; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 5c424010bc025..0e4cb0e668eb5 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -83,9 +83,8 @@ enum latency_mode { HID_LATENCY_HIGH = 1, }; -#define MT_IO_FLAGS_RUNNING 0 -#define MT_IO_FLAGS_ACTIVE_SLOTS 1 -#define MT_IO_FLAGS_PENDING_SLOTS 2 +#define MT_IO_SLOTS_MASK GENMASK(7, 0) /* reserve first 8 bits for slot tracking */ +#define MT_IO_FLAGS_RUNNING 32 static const bool mtrue = true; /* default for true */ static const bool mfalse; /* default for false */ @@ -160,7 +159,11 @@ struct mt_device { struct mt_class mtclass; /* our mt device class */ struct timer_list release_timer; /* to release sticky fingers */ struct hid_device *hdev; /* hid_device we're attached to */ - unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */ + unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_RUNNING) + * first 8 bits are reserved for keeping the slot + * states, this is fine because we only support up + * to 250 slots (MT_MAX_MAXCONTACT) + */ __u8 inputmode_value; /* InputMode HID feature value */ __u8 maxcontacts; bool is_buttonpad; /* is this device a button pad? */ @@ -941,6 +944,7 @@ static void mt_release_pending_palms(struct mt_device *td, for_each_set_bit(slotnum, app->pending_palm_slots, td->maxcontacts) { clear_bit(slotnum, app->pending_palm_slots); + clear_bit(slotnum, &td->mt_io_flags); input_mt_slot(input, slotnum); input_mt_report_slot_inactive(input); @@ -972,12 +976,6 @@ static void mt_sync_frame(struct mt_device *td, struct mt_application *app, app->num_received = 0; app->left_button_state = 0; - - if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags)) - set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags); - else - clear_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags); - clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags); } static int mt_compute_timestamp(struct mt_application *app, __s32 value) @@ -1152,7 +1150,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input, input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major); input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor); - set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags); + set_bit(slotnum, &td->mt_io_flags); + } else { + clear_bit(slotnum, &td->mt_io_flags); } return 0; @@ -1287,7 +1287,7 @@ static void mt_touch_report(struct hid_device *hid, * defect. */ if (app->quirks & MT_QUIRK_STICKY_FINGERS) { - if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags)) + if (td->mt_io_flags & MT_IO_SLOTS_MASK) mod_timer(&td->release_timer, jiffies + msecs_to_jiffies(100)); else @@ -1663,6 +1663,7 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) case HID_CP_CONSUMER_CONTROL: case HID_GD_WIRELESS_RADIO_CTLS: case HID_GD_SYSTEM_MULTIAXIS: + case HID_DG_PEN: /* already handled by hid core */ break; case HID_DG_TOUCHSCREEN: @@ -1734,6 +1735,7 @@ static void mt_release_contacts(struct hid_device *hid) for (i = 0; i < mt->num_slots; i++) { input_mt_slot(input_dev, i); input_mt_report_slot_inactive(input_dev); + clear_bit(i, &td->mt_io_flags); } input_mt_sync_frame(input_dev); input_sync(input_dev); @@ -1756,7 +1758,7 @@ static void mt_expired_timeout(struct timer_list *t) */ if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) return; - if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags)) + if (td->mt_io_flags & MT_IO_SLOTS_MASK) mt_release_contacts(hdev); clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); } diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index c887f48756f4b..bbd6f23bce789 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -394,27 +394,15 @@ static int hidraw_revoke(struct hidraw_list *list) return 0; } -static long hidraw_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long hidraw_fixed_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, + void __user *arg) { - struct inode *inode = file_inode(file); - unsigned int minor = iminor(inode); - long ret = 0; - struct hidraw *dev; - struct hidraw_list *list = file->private_data; - void __user *user_arg = (void __user*) arg; - - down_read(&minors_rwsem); - dev = hidraw_table[minor]; - if (!dev || !dev->exist || hidraw_is_revoked(list)) { - ret = -ENODEV; - goto out; - } + struct hid_device *hid = dev->hid; switch (cmd) { case HIDIOCGRDESCSIZE: - if (put_user(dev->hid->rsize, (int __user *)arg)) - ret = -EFAULT; + if (put_user(hid->rsize, (int __user *)arg)) + return -EFAULT; break; case HIDIOCGRDESC: @@ -422,113 +410,145 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, __u32 len; if (get_user(len, (int __user *)arg)) - ret = -EFAULT; - else if (len > HID_MAX_DESCRIPTOR_SIZE - 1) - ret = -EINVAL; - else if (copy_to_user(user_arg + offsetof( - struct hidraw_report_descriptor, - value[0]), - dev->hid->rdesc, - min(dev->hid->rsize, len))) - ret = -EFAULT; + return -EFAULT; + + if (len > HID_MAX_DESCRIPTOR_SIZE - 1) + return -EINVAL; + + if (copy_to_user(arg + offsetof( + struct hidraw_report_descriptor, + value[0]), + hid->rdesc, + min(hid->rsize, len))) + return -EFAULT; + break; } case HIDIOCGRAWINFO: { struct hidraw_devinfo dinfo; - dinfo.bustype = dev->hid->bus; - dinfo.vendor = dev->hid->vendor; - dinfo.product = dev->hid->product; - if (copy_to_user(user_arg, &dinfo, sizeof(dinfo))) - ret = -EFAULT; + dinfo.bustype = hid->bus; + dinfo.vendor = hid->vendor; + dinfo.product = hid->product; + if (copy_to_user(arg, &dinfo, sizeof(dinfo))) + return -EFAULT; break; } case HIDIOCREVOKE: { - if (user_arg) - ret = -EINVAL; - else - ret = hidraw_revoke(list); - break; + struct hidraw_list *list = file->private_data; + + if (arg) + return -EINVAL; + + return hidraw_revoke(list); } default: - { - struct hid_device *hid = dev->hid; - if (_IOC_TYPE(cmd) != 'H') { - ret = -EINVAL; - break; - } + /* + * None of the above ioctls can return -EAGAIN, so + * use it as a marker that we need to check variable + * length ioctls. + */ + return -EAGAIN; + } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT); - break; - } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT); - break; - } + return 0; +} - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSINPUT(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT); - break; - } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGINPUT(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT); - break; - } +static long hidraw_rw_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, + void __user *user_arg) +{ + int len = _IOC_SIZE(cmd); + + switch (cmd & ~IOCSIZE_MASK) { + case HIDIOCSFEATURE(0): + return hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT); + case HIDIOCGFEATURE(0): + return hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT); + case HIDIOCSINPUT(0): + return hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT); + case HIDIOCGINPUT(0): + return hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT); + case HIDIOCSOUTPUT(0): + return hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT); + case HIDIOCGOUTPUT(0): + return hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT); + } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSOUTPUT(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT); - break; - } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGOUTPUT(0))) { - int len = _IOC_SIZE(cmd); - ret = hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT); - break; - } + return -EINVAL; +} - /* Begin Read-only ioctls. */ - if (_IOC_DIR(cmd) != _IOC_READ) { - ret = -EINVAL; - break; - } +static long hidraw_ro_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, + void __user *user_arg) +{ + struct hid_device *hid = dev->hid; + int len = _IOC_SIZE(cmd); + int field_len; + + switch (cmd & ~IOCSIZE_MASK) { + case HIDIOCGRAWNAME(0): + field_len = strlen(hid->name) + 1; + if (len > field_len) + len = field_len; + return copy_to_user(user_arg, hid->name, len) ? -EFAULT : len; + case HIDIOCGRAWPHYS(0): + field_len = strlen(hid->phys) + 1; + if (len > field_len) + len = field_len; + return copy_to_user(user_arg, hid->phys, len) ? -EFAULT : len; + case HIDIOCGRAWUNIQ(0): + field_len = strlen(hid->uniq) + 1; + if (len > field_len) + len = field_len; + return copy_to_user(user_arg, hid->uniq, len) ? -EFAULT : len; + } - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWNAME(0))) { - int len = strlen(hid->name) + 1; - if (len > _IOC_SIZE(cmd)) - len = _IOC_SIZE(cmd); - ret = copy_to_user(user_arg, hid->name, len) ? - -EFAULT : len; - break; - } + return -EINVAL; +} - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWPHYS(0))) { - int len = strlen(hid->phys) + 1; - if (len > _IOC_SIZE(cmd)) - len = _IOC_SIZE(cmd); - ret = copy_to_user(user_arg, hid->phys, len) ? - -EFAULT : len; - break; - } +static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct inode *inode = file_inode(file); + unsigned int minor = iminor(inode); + struct hidraw *dev; + struct hidraw_list *list = file->private_data; + void __user *user_arg = (void __user *)arg; + int ret; - if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) { - int len = strlen(hid->uniq) + 1; - if (len > _IOC_SIZE(cmd)) - len = _IOC_SIZE(cmd); - ret = copy_to_user(user_arg, hid->uniq, len) ? - -EFAULT : len; - break; - } - } + down_read(&minors_rwsem); + dev = hidraw_table[minor]; + if (!dev || !dev->exist || hidraw_is_revoked(list)) { + ret = -ENODEV; + goto out; + } + + if (_IOC_TYPE(cmd) != 'H') { + ret = -EINVAL; + goto out; + } + if (_IOC_NR(cmd) > HIDIOCTL_LAST || _IOC_NR(cmd) == 0) { ret = -ENOTTY; + goto out; } + + ret = hidraw_fixed_size_ioctl(file, dev, cmd, user_arg); + if (ret != -EAGAIN) + goto out; + + switch (_IOC_DIR(cmd)) { + case (_IOC_READ | _IOC_WRITE): + ret = hidraw_rw_variable_size_ioctl(file, dev, cmd, user_arg); + break; + case _IOC_READ: + ret = hidraw_ro_variable_size_ioctl(file, dev, cmd, user_arg); + break; + default: + /* Any other IOC_DIR is wrong */ + ret = -EINVAL; + } + out: up_read(&minors_rwsem); return ret; diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c index c25a54d5b39ad..0ba9195c9d713 100644 --- a/drivers/hwmon/mlxreg-fan.c +++ b/drivers/hwmon/mlxreg-fan.c @@ -113,8 +113,8 @@ struct mlxreg_fan { int divider; }; -static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, - unsigned long state); +static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state, bool thermal); static int mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, @@ -224,8 +224,9 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, * last thermal state. */ if (pwm->last_hwmon_state >= pwm->last_thermal_state) - return mlxreg_fan_set_cur_state(pwm->cdev, - pwm->last_hwmon_state); + return _mlxreg_fan_set_cur_state(pwm->cdev, + pwm->last_hwmon_state, + false); return 0; } return regmap_write(fan->regmap, pwm->reg, val); @@ -357,9 +358,8 @@ static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev, return 0; } -static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, - unsigned long state) - +static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state, bool thermal) { struct mlxreg_fan_pwm *pwm = cdev->devdata; struct mlxreg_fan *fan = pwm->fan; @@ -369,7 +369,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, return -EINVAL; /* Save thermal state. */ - pwm->last_thermal_state = state; + if (thermal) + pwm->last_thermal_state = state; state = max_t(unsigned long, state, pwm->last_hwmon_state); err = regmap_write(fan->regmap, pwm->reg, @@ -381,6 +382,13 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, return 0; } +static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) + +{ + return _mlxreg_fan_set_cur_state(cdev, state, true); +} + static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = { .get_max_state = mlxreg_fan_get_max_state, .get_cur_state = mlxreg_fan_get_cur_state, diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c index 650b0bcc2359e..94466e28dc56f 100644 --- a/drivers/hwmon/sht3x.c +++ b/drivers/hwmon/sht3x.c @@ -294,24 +294,26 @@ static struct sht3x_data *sht3x_update_client(struct device *dev) return data; } -static int temp1_input_read(struct device *dev) +static int temp1_input_read(struct device *dev, long *temp) { struct sht3x_data *data = sht3x_update_client(dev); if (IS_ERR(data)) return PTR_ERR(data); - return data->temperature; + *temp = data->temperature; + return 0; } -static int humidity1_input_read(struct device *dev) +static int humidity1_input_read(struct device *dev, long *humidity) { struct sht3x_data *data = sht3x_update_client(dev); if (IS_ERR(data)) return PTR_ERR(data); - return data->humidity; + *humidity = data->humidity; + return 0; } /* @@ -709,6 +711,7 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { enum sht3x_limits index; + int ret; switch (type) { case hwmon_chip: @@ -723,10 +726,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type, case hwmon_temp: switch (attr) { case hwmon_temp_input: - *val = temp1_input_read(dev); - break; + return temp1_input_read(dev, val); case hwmon_temp_alarm: - *val = temp1_alarm_read(dev); + ret = temp1_alarm_read(dev); + if (ret < 0) + return ret; + *val = ret; break; case hwmon_temp_max: index = limit_max; @@ -751,10 +756,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type, case hwmon_humidity: switch (attr) { case hwmon_humidity_input: - *val = humidity1_input_read(dev); - break; + return humidity1_input_read(dev, val); case hwmon_humidity_alarm: - *val = humidity1_alarm_read(dev); + ret = humidity1_alarm_read(dev); + if (ret < 0) + return ret; + *val = ret; break; case hwmon_humidity_max: index = limit_max; diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index 25fd02955c38d..abfff42b20c93 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -521,6 +521,10 @@ static int __catu_probe(struct device *dev, struct resource *res) struct coresight_platform_data *pdata = NULL; void __iomem *base; + drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk"); + if (IS_ERR(drvdata->atclk)) + return PTR_ERR(drvdata->atclk); + catu_desc.name = coresight_alloc_device_name(&catu_devs, dev); if (!catu_desc.name) return -ENOMEM; @@ -668,18 +672,26 @@ static int catu_runtime_suspend(struct device *dev) { struct catu_drvdata *drvdata = dev_get_drvdata(dev); - if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) - clk_disable_unprepare(drvdata->pclk); + clk_disable_unprepare(drvdata->atclk); + clk_disable_unprepare(drvdata->pclk); + return 0; } static int catu_runtime_resume(struct device *dev) { struct catu_drvdata *drvdata = dev_get_drvdata(dev); + int ret; - if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) - clk_prepare_enable(drvdata->pclk); - return 0; + ret = clk_prepare_enable(drvdata->pclk); + if (ret) + return ret; + + ret = clk_prepare_enable(drvdata->atclk); + if (ret) + clk_disable_unprepare(drvdata->pclk); + + return ret; } #endif diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h index 755776cd19c5b..6e6b7aac206dc 100644 --- a/drivers/hwtracing/coresight/coresight-catu.h +++ b/drivers/hwtracing/coresight/coresight-catu.h @@ -62,6 +62,7 @@ struct catu_drvdata { struct clk *pclk; + struct clk *atclk; void __iomem *base; struct coresight_device *csdev; int irq; diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index b7941d8abbfe7..f20d4cab8f1df 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1200,8 +1200,9 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) goto out_unlock; } - if (csdev->type == CORESIGHT_DEV_TYPE_SINK || - csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { + if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && + sink_ops(csdev)->alloc_buffer) { ret = etm_perf_add_symlink_sink(csdev); if (ret) { diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index be8b46f26ddc8..7b9eaeb115d21 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -481,7 +481,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR); etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR); } - etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR); + if (drvdata->numextinsel) + etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR); for (i = 0; i < drvdata->nr_cntr; i++) { etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i)); etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i)); @@ -1362,6 +1363,7 @@ static void etm4_init_arch_data(void *info) etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5); /* NUMEXTIN, bits[8:0] number of external inputs implemented */ drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5); + drvdata->numextinsel = FIELD_GET(TRCIDR5_NUMEXTINSEL_MASK, etmidr5); /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */ drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5); /* ATBTRIG, bit[22] implementation can support ATB triggers? */ @@ -1789,7 +1791,9 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR); state->trcseqstr = etm4x_read32(csa, TRCSEQSTR); } - state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR); + + if (drvdata->numextinsel) + state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR); for (i = 0; i < drvdata->nr_cntr; i++) { state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i)); @@ -1921,7 +1925,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR); etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR); } - etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR); + if (drvdata->numextinsel) + etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR); for (i = 0; i < drvdata->nr_cntr; i++) { etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i)); @@ -2152,6 +2157,10 @@ static int etm4_probe(struct device *dev) if (WARN_ON(!drvdata)) return -ENOMEM; + drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk"); + if (IS_ERR(drvdata->atclk)) + return PTR_ERR(drvdata->atclk); + if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE) pm_save_enable = coresight_loses_context_with_cpu(dev) ? PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER; @@ -2400,8 +2409,8 @@ static int etm4_runtime_suspend(struct device *dev) { struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); - if (drvdata->pclk && !IS_ERR(drvdata->pclk)) - clk_disable_unprepare(drvdata->pclk); + clk_disable_unprepare(drvdata->atclk); + clk_disable_unprepare(drvdata->pclk); return 0; } @@ -2409,11 +2418,17 @@ static int etm4_runtime_suspend(struct device *dev) static int etm4_runtime_resume(struct device *dev) { struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); + int ret; - if (drvdata->pclk && !IS_ERR(drvdata->pclk)) - clk_prepare_enable(drvdata->pclk); + ret = clk_prepare_enable(drvdata->pclk); + if (ret) + return ret; - return 0; + ret = clk_prepare_enable(drvdata->atclk); + if (ret) + clk_disable_unprepare(drvdata->pclk); + + return ret; } #endif diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 9e9165f62e81f..3683966bd0603 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -162,6 +162,7 @@ #define TRCIDR4_NUMVMIDC_MASK GENMASK(31, 28) #define TRCIDR5_NUMEXTIN_MASK GENMASK(8, 0) +#define TRCIDR5_NUMEXTINSEL_MASK GENMASK(11, 9) #define TRCIDR5_TRACEIDSIZE_MASK GENMASK(21, 16) #define TRCIDR5_ATBTRIG BIT(22) #define TRCIDR5_LPOVERRIDE BIT(23) @@ -919,7 +920,8 @@ struct etmv4_save_state { /** * struct etm4_drvdata - specifics associated to an ETM component - * @pclk APB clock if present, otherwise NULL + * @pclk: APB clock if present, otherwise NULL + * @atclk: Optional clock for the core parts of the ETMv4. * @base: Memory mapped base address for this component. * @csdev: Component vitals needed by the framework. * @spinlock: Only one at a time pls. @@ -987,6 +989,7 @@ struct etmv4_save_state { */ struct etmv4_drvdata { struct clk *pclk; + struct clk *atclk; void __iomem *base; struct coresight_device *csdev; spinlock_t spinlock; @@ -998,6 +1001,7 @@ struct etmv4_drvdata { u8 nr_cntr; u8 nr_ext_inp; u8 numcidc; + u8 numextinsel; u8 numvmidc; u8 nrseqstate; u8 nr_event; diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index 475fa4bb6813b..96ef2517dd439 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -480,6 +480,10 @@ static int __tmc_probe(struct device *dev, struct resource *res) struct coresight_desc desc = { 0 }; struct coresight_dev_list *dev_list = NULL; + drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk"); + if (IS_ERR(drvdata->atclk)) + return PTR_ERR(drvdata->atclk); + ret = -ENOMEM; /* Validity for the resource is already checked by the AMBA core */ @@ -700,18 +704,26 @@ static int tmc_runtime_suspend(struct device *dev) { struct tmc_drvdata *drvdata = dev_get_drvdata(dev); - if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) - clk_disable_unprepare(drvdata->pclk); + clk_disable_unprepare(drvdata->atclk); + clk_disable_unprepare(drvdata->pclk); + return 0; } static int tmc_runtime_resume(struct device *dev) { struct tmc_drvdata *drvdata = dev_get_drvdata(dev); + int ret; - if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) - clk_prepare_enable(drvdata->pclk); - return 0; + ret = clk_prepare_enable(drvdata->pclk); + if (ret) + return ret; + + ret = clk_prepare_enable(drvdata->atclk); + if (ret) + clk_disable_unprepare(drvdata->pclk); + + return ret; } #endif diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h index 2671926be62a3..2a53acbb5990b 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.h +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -166,6 +166,7 @@ struct etr_buf { /** * struct tmc_drvdata - specifics associated to an TMC component + * @atclk: optional clock for the core parts of the TMC. * @pclk: APB clock if present, otherwise NULL * @base: memory mapped base address for this component. * @csdev: component vitals needed by the framework. @@ -191,6 +192,7 @@ struct etr_buf { * @perf_buf: PERF buffer for ETR. */ struct tmc_drvdata { + struct clk *atclk; struct clk *pclk; void __iomem *base; struct coresight_device *csdev; diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c index bfca103f9f847..865fd6273e5e4 100644 --- a/drivers/hwtracing/coresight/coresight-tpda.c +++ b/drivers/hwtracing/coresight/coresight-tpda.c @@ -71,12 +71,15 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata, if (tpdm_has_dsb_dataset(tpdm_data)) { rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent), "qcom,dsb-element-bits", &drvdata->dsb_esize); + if (rc) + goto out; } if (tpdm_has_cmb_dataset(tpdm_data)) { rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent), "qcom,cmb-element-bits", &drvdata->cmb_esize); } +out: if (rc) dev_warn_once(&csdev->dev, "Failed to read TPDM Element size: %d\n", rc); diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index 96a32b2136699..d771980a278dc 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -22,7 +22,8 @@ #include "coresight-self-hosted-trace.h" #include "coresight-trbe.h" -#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) +#define PERF_IDX2OFF(idx, buf) \ + ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT)) /* * A padding packet that will help the user space tools @@ -744,12 +745,12 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev, buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event)); if (!buf) - return ERR_PTR(-ENOMEM); + return NULL; pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL); if (!pglist) { kfree(buf); - return ERR_PTR(-ENOMEM); + return NULL; } for (i = 0; i < nr_pages; i++) @@ -759,7 +760,7 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev, if (!buf->trbe_base) { kfree(pglist); kfree(buf); - return ERR_PTR(-ENOMEM); + return NULL; } buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE; buf->trbe_write = buf->trbe_base; @@ -1266,7 +1267,7 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp * into the device for that purpose. */ desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL); - if (IS_ERR(desc.pdata)) + if (!desc.pdata) goto cpu_clear; desc.type = CORESIGHT_DEV_TYPE_SINK; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index a3e86930bf418..24c0ada72f6a5 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -101,7 +101,7 @@ static int bt1_i2c_request_regs(struct dw_i2c_dev *dev) } #endif -static int txgbe_i2c_request_regs(struct dw_i2c_dev *dev) +static int dw_i2c_get_parent_regmap(struct dw_i2c_dev *dev) { dev->map = dev_get_regmap(dev->dev->parent, NULL); if (!dev->map) @@ -123,12 +123,15 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev) struct platform_device *pdev = to_platform_device(dev->dev); int ret; + if (device_is_compatible(dev->dev, "intel,xe-i2c")) + return dw_i2c_get_parent_regmap(dev); + switch (dev->flags & MODEL_MASK) { case MODEL_BAIKAL_BT1: ret = bt1_i2c_request_regs(dev); break; case MODEL_WANGXUN_SP: - ret = txgbe_i2c_request_regs(dev); + ret = dw_i2c_get_parent_regmap(dev); break; default: dev->base = devm_platform_ioremap_resource(pdev, 0); @@ -308,6 +311,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) exit_probe: dw_i2c_plat_pm_cleanup(dev); + i2c_dw_prepare_clk(dev, false); exit_reset: reset_control_assert(dev->rst); return ret; @@ -325,9 +329,11 @@ static void dw_i2c_plat_remove(struct platform_device *pdev) i2c_dw_disable(dev); pm_runtime_dont_use_autosuspend(device); - pm_runtime_put_sync(device); + pm_runtime_put_noidle(device); dw_i2c_plat_pm_cleanup(dev); + i2c_dw_prepare_clk(dev, false); + i2c_dw_remove_lock_support(dev); reset_control_assert(dev->rst); diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index e0ba653dec2d6..8beef73960c74 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -1243,6 +1243,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, { int ret; int left_num = num; + bool write_then_read_en = false; struct mtk_i2c *i2c = i2c_get_adapdata(adap); ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks); @@ -1256,6 +1257,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, if (!(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD) && msgs[0].addr == msgs[1].addr) { i2c->auto_restart = 0; + write_then_read_en = true; } } @@ -1280,12 +1282,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, else i2c->op = I2C_MASTER_WR; - if (!i2c->auto_restart) { - if (num > 1) { - /* combined two messages into one transaction */ - i2c->op = I2C_MASTER_WRRD; - left_num--; - } + if (write_then_read_en) { + /* combined two messages into one transaction */ + i2c->op = I2C_MASTER_WRRD; + left_num--; } /* always use DMA mode. */ @@ -1293,7 +1293,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, if (ret < 0) goto err_exit; - msgs++; + if (i2c->op == I2C_MASTER_WRRD) + msgs += 2; + else + msgs++; } /* the return value is number of executed messages */ ret = num; diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index 474a96ebda226..a1945bf9ef19e 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -377,6 +377,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master, SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000); if (ret) { dev_err(master->dev, "Timeout when polling for COMPLETE\n"); + i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); return ret; } @@ -438,9 +439,24 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) */ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); - /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */ - writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI | - SVC_I3C_MCTRL_IBIRESP_AUTO, + /* + * Write REQUEST_START_ADDR request to emit broadcast address for arbitration, + * instend of using AUTO_IBI. + * + * Using AutoIBI request may cause controller to remain in AutoIBI state when + * there is a glitch on SDA line (high->low->high). + * 1. SDA high->low, raising an interrupt to execute IBI isr. + * 2. SDA low->high. + * 3. IBI isr writes an AutoIBI request. + * 4. The controller will not start AutoIBI process because SDA is not low. + * 5. IBIWON polling times out. + * 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request. + */ + writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | + SVC_I3C_MCTRL_TYPE_I3C | + SVC_I3C_MCTRL_IBIRESP_MANUAL | + SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) | + SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR), master->regs + SVC_I3C_MCTRL); /* Wait for IBIWON, should take approximately 100us */ @@ -460,10 +476,15 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) switch (ibitype) { case SVC_I3C_MSTATUS_IBITYPE_IBI: dev = svc_i3c_master_dev_from_addr(master, ibiaddr); - if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) + if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) { svc_i3c_master_nack_ibi(master); - else + } else { + if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) + svc_i3c_master_ack_ibi(master, true); + else + svc_i3c_master_ack_ibi(master, false); svc_i3c_master_handle_ibi(master, dev); + } break; case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN)) diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c index c3f9fa307b84c..2db16c36d8144 100644 --- a/drivers/iio/adc/pac1934.c +++ b/drivers/iio/adc/pac1934.c @@ -88,6 +88,7 @@ #define PAC1934_VPOWER_3_ADDR 0x19 #define PAC1934_VPOWER_4_ADDR 0x1A #define PAC1934_REFRESH_V_REG_ADDR 0x1F +#define PAC1934_SLOW_REG_ADDR 0x20 #define PAC1934_CTRL_STAT_REGS_ADDR 0x1C #define PAC1934_PID_REG_ADDR 0xFD #define PAC1934_MID_REG_ADDR 0xFE @@ -1265,8 +1266,23 @@ static int pac1934_chip_configure(struct pac1934_chip_info *info) /* no SLOW triggered REFRESH, clear POR */ regs[PAC1934_SLOW_REG_OFF] = 0; - ret = i2c_smbus_write_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR, - ARRAY_SIZE(regs), (u8 *)regs); + /* + * Write the three bytes sequentially, as the device does not support + * block write. + */ + ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_STAT_REGS_ADDR, + regs[PAC1934_CHANNEL_DIS_REG_OFF]); + if (ret) + return ret; + + ret = i2c_smbus_write_byte_data(client, + PAC1934_CTRL_STAT_REGS_ADDR + PAC1934_NEG_PWR_REG_OFF, + regs[PAC1934_NEG_PWR_REG_OFF]); + if (ret) + return ret; + + ret = i2c_smbus_write_byte_data(client, PAC1934_SLOW_REG_ADDR, + regs[PAC1934_SLOW_REG_OFF]); if (ret) return ret; diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c index ebc583b07e0c0..e12eb7137b06a 100644 --- a/drivers/iio/adc/xilinx-ams.c +++ b/drivers/iio/adc/xilinx-ams.c @@ -118,7 +118,7 @@ #define AMS_ALARM_THRESHOLD_OFF_10 0x10 #define AMS_ALARM_THRESHOLD_OFF_20 0x20 -#define AMS_ALARM_THR_DIRECT_MASK BIT(1) +#define AMS_ALARM_THR_DIRECT_MASK BIT(0) #define AMS_ALARM_THR_MIN 0x0000 #define AMS_ALARM_THR_MAX (BIT(16) - 1) @@ -389,6 +389,29 @@ static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask) ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg); } +static void ams_unmask(struct ams *ams) +{ + unsigned int status, unmask; + + status = readl(ams->base + AMS_ISR_0); + + /* Clear those bits which are not active anymore */ + unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm; + + /* Clear status of disabled alarm */ + unmask |= ams->intr_mask; + + ams->current_masked_alarm &= status; + + /* Also clear those which are masked out anyway */ + ams->current_masked_alarm &= ~ams->intr_mask; + + /* Clear the interrupts before we unmask them */ + writel(unmask, ams->base + AMS_ISR_0); + + ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK); +} + static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask) { unsigned long flags; @@ -401,6 +424,7 @@ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask) spin_lock_irqsave(&ams->intr_lock, flags); ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask); + ams_unmask(ams); spin_unlock_irqrestore(&ams->intr_lock, flags); } @@ -1035,28 +1059,9 @@ static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events) static void ams_unmask_worker(struct work_struct *work) { struct ams *ams = container_of(work, struct ams, ams_unmask_work.work); - unsigned int status, unmask; spin_lock_irq(&ams->intr_lock); - - status = readl(ams->base + AMS_ISR_0); - - /* Clear those bits which are not active anymore */ - unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm; - - /* Clear status of disabled alarm */ - unmask |= ams->intr_mask; - - ams->current_masked_alarm &= status; - - /* Also clear those which are masked out anyway */ - ams->current_masked_alarm &= ~ams->intr_mask; - - /* Clear the interrupts before we unmask them */ - writel(unmask, ams->base + AMS_ISR_0); - - ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK); - + ams_unmask(ams); spin_unlock_irq(&ams->intr_lock); /* If still pending some alarm re-trigger the timer */ diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c index e0b7f658d6119..cf9cf90cd6e27 100644 --- a/drivers/iio/dac/ad5360.c +++ b/drivers/iio/dac/ad5360.c @@ -262,7 +262,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set, unsigned int clr) { struct ad5360_state *st = iio_priv(indio_dev); - unsigned int ret; + int ret; mutex_lock(&st->lock); diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c index 7644acfd879e0..9228e3cee1b85 100644 --- a/drivers/iio/dac/ad5421.c +++ b/drivers/iio/dac/ad5421.c @@ -186,7 +186,7 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set, unsigned int clr) { struct ad5421_state *st = iio_priv(indio_dev); - unsigned int ret; + int ret; mutex_lock(&st->lock); diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c index e13e64a5164c1..f58d23c049ec3 100644 --- a/drivers/iio/frequency/adf4350.c +++ b/drivers/iio/frequency/adf4350.c @@ -149,6 +149,19 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq) return -EINVAL; + st->r4_rf_div_sel = 0; + + /* + * !\TODO: The below computation is making sure we get a power of 2 + * shift (st->r4_rf_div_sel) so that freq becomes higher or equal to + * ADF4350_MIN_VCO_FREQ. This might be simplified with fls()/fls_long() + * and friends. + */ + while (freq < ADF4350_MIN_VCO_FREQ) { + freq <<= 1; + st->r4_rf_div_sel++; + } + if (freq > ADF4350_MAX_FREQ_45_PRESC) { prescaler = ADF4350_REG1_PRESCALER; mdiv = 75; @@ -157,13 +170,6 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) mdiv = 23; } - st->r4_rf_div_sel = 0; - - while (freq < ADF4350_MIN_VCO_FREQ) { - freq <<= 1; - st->r4_rf_div_sel++; - } - /* * Allow a predefined reference division factor * if not set, compute our own diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index 73aeddf53b767..9f88d8ca6d1b1 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -667,20 +667,12 @@ static void inv_icm42600_disable_vdd_reg(void *_data) static void inv_icm42600_disable_vddio_reg(void *_data) { struct inv_icm42600_state *st = _data; - const struct device *dev = regmap_get_device(st->map); - int ret; - - ret = regulator_disable(st->vddio_supply); - if (ret) - dev_err(dev, "failed to disable vddio error %d\n", ret); -} + struct device *dev = regmap_get_device(st->map); -static void inv_icm42600_disable_pm(void *_data) -{ - struct device *dev = _data; + if (pm_runtime_status_suspended(dev)) + return; - pm_runtime_put_sync(dev); - pm_runtime_disable(dev); + regulator_disable(st->vddio_supply); } int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq, @@ -777,16 +769,14 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq, return ret; /* setup runtime power management */ - ret = pm_runtime_set_active(dev); + ret = devm_pm_runtime_set_active_enabled(dev); if (ret) return ret; - pm_runtime_get_noresume(dev); - pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(dev, INV_ICM42600_SUSPEND_DELAY_MS); pm_runtime_use_autosuspend(dev); - pm_runtime_put(dev); - return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev); + return ret; } EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600); @@ -797,17 +787,15 @@ EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600); static int inv_icm42600_suspend(struct device *dev) { struct inv_icm42600_state *st = dev_get_drvdata(dev); - int ret; + int ret = 0; mutex_lock(&st->lock); st->suspended.gyro = st->conf.gyro.mode; st->suspended.accel = st->conf.accel.mode; st->suspended.temp = st->conf.temp_en; - if (pm_runtime_suspended(dev)) { - ret = 0; + if (pm_runtime_suspended(dev)) goto out_unlock; - } /* disable FIFO data streaming */ if (st->fifo.on) { @@ -839,18 +827,17 @@ static int inv_icm42600_resume(struct device *dev) struct inv_icm42600_state *st = dev_get_drvdata(dev); struct inv_icm42600_sensor_state *gyro_st = iio_priv(st->indio_gyro); struct inv_icm42600_sensor_state *accel_st = iio_priv(st->indio_accel); - int ret; + int ret = 0; mutex_lock(&st->lock); + if (pm_runtime_suspended(dev)) + goto out_unlock; + ret = inv_icm42600_enable_regulator_vddio(st); if (ret) goto out_unlock; - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - /* restore sensors state */ ret = inv_icm42600_set_pwr_mgmt0(st, st->suspended.gyro, st->suspended.accel, diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 1155487f7aeac..85ba80c57d08f 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -602,7 +603,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, { int scale_type, scale_val, scale_val2; int offset_type, offset_val, offset_val2; - s64 raw64 = raw; + s64 denominator, raw64 = raw; offset_type = iio_channel_read(chan, &offset_val, &offset_val2, IIO_CHAN_INFO_OFFSET); @@ -637,7 +638,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, * If no channel scaling is available apply consumer scale to * raw value and return. */ - *processed = raw * scale; + *processed = raw64 * scale; return 0; } @@ -646,20 +647,19 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, *processed = raw64 * scale_val * scale; break; case IIO_VAL_INT_PLUS_MICRO: - if (scale_val2 < 0) - *processed = -raw64 * scale_val * scale; - else - *processed = raw64 * scale_val * scale; - *processed += div_s64(raw64 * (s64)scale_val2 * scale, - 1000000LL); - break; case IIO_VAL_INT_PLUS_NANO: - if (scale_val2 < 0) - *processed = -raw64 * scale_val * scale; - else - *processed = raw64 * scale_val * scale; - *processed += div_s64(raw64 * (s64)scale_val2 * scale, - 1000000000LL); + switch (scale_type) { + case IIO_VAL_INT_PLUS_MICRO: + denominator = MICRO; + break; + case IIO_VAL_INT_PLUS_NANO: + denominator = NANO; + break; + } + *processed = raw64 * scale * abs(scale_val); + *processed += div_s64(raw64 * scale * abs(scale_val2), denominator); + if (scale_val < 0 || scale_val2 < 0) + *processed *= -1; break; case IIO_VAL_FRACTIONAL: *processed = div_s64(raw64 * (s64)scale_val * scale, diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index be0743dac3fff..929e89841c12a 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -454,14 +454,10 @@ static int addr_resolve_neigh(const struct dst_entry *dst, { int ret = 0; - if (ndev_flags & IFF_LOOPBACK) { + if (ndev_flags & IFF_LOOPBACK) memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); - } else { - if (!(ndev_flags & IFF_NOARP)) { - /* If the device doesn't do ARP internally */ - ret = fetch_ha(dst, addr, dst_in, seq); - } - } + else + ret = fetch_ha(dst, addr, dst_in, seq); return ret; } diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index d45e3909dafe1..50bb3c43f40bf 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1032,8 +1032,8 @@ static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id, struct cm_id_private *cm_id_priv; cm_id_priv = container_of(cm_id, struct cm_id_private, id); - pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__, - cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount)); + pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__, + cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount)); } static void cm_destroy_id(struct ib_cm_id *cm_id, int err) diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 53571e6b3162c..66df5bed6a562 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1013,6 +1013,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; + spin_lock_irqsave(&ib_nl_request_lock, flags); + delta = timeout - sa_local_svc_timeout_ms; if (delta < 0) abs_delta = -delta; @@ -1020,7 +1022,6 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, abs_delta = delta; if (delta != 0) { - spin_lock_irqsave(&ib_nl_request_lock, flags); sa_local_svc_timeout_ms = timeout; list_for_each_entry(query, &ib_nl_request_list, list) { if (delta < 0 && abs_delta > query->timeout) @@ -1038,9 +1039,10 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, if (delay) mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, (unsigned long)delay); - spin_unlock_irqrestore(&ib_nl_request_lock, flags); } + spin_unlock_irqrestore(&ib_nl_request_lock, flags); + settimeout_out: return 0; } diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index f49f78b69ab9c..e74273bc078be 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -191,6 +191,7 @@ static u16 get_legacy_obj_type(u16 opcode) { switch (opcode) { case MLX5_CMD_OP_CREATE_RQ: + case MLX5_CMD_OP_CREATE_RMP: return MLX5_EVENT_QUEUE_TYPE_RQ; case MLX5_CMD_OP_CREATE_QP: return MLX5_EVENT_QUEUE_TYPE_QP; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 435c456a4fd5b..f3e58797705d7 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -865,6 +866,51 @@ static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev, resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask(); } +/* + * Calculate maximum SQ overhead across all QP types. + * Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT) + * have smaller overhead than the types calculated below, + * so they are implicitly included. + */ +static u32 mlx5_ib_calc_max_sq_overhead(void) +{ + u32 max_overhead_xrc, overhead_ud_lso, a, b; + + /* XRC_INI */ + max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg); + max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg); + a = sizeof(struct mlx5_wqe_atomic_seg) + + sizeof(struct mlx5_wqe_raddr_seg); + b = sizeof(struct mlx5_wqe_umr_ctrl_seg) + + sizeof(struct mlx5_mkey_seg) + + MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD; + max_overhead_xrc += max(a, b); + + /* UD with LSO */ + overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg); + overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad); + overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg); + overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg); + + return max(max_overhead_xrc, overhead_ud_lso); +} + +static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev) +{ + struct mlx5_core_dev *mdev = dev->mdev; + u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); + u32 max_wqe_size; + /* max QP overhead + 1 SGE, no inline, no special features */ + max_wqe_size = mlx5_ib_calc_max_sq_overhead() + + sizeof(struct mlx5_wqe_data_seg); + + max_wqe_size = roundup_pow_of_two(max_wqe_size); + + max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB); + + return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size; +} + static int mlx5_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) @@ -1023,7 +1069,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->max_mr_size = ~0ull; props->page_size_cap = ~(min_page_size - 1); props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); - props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); + props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev); max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / sizeof(struct mlx5_wqe_data_seg); max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512); @@ -1767,7 +1813,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev, } static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master, - struct mlx5_core_dev *slave) + struct mlx5_core_dev *slave, + struct mlx5_ib_lb_state *lb_state) { int err; @@ -1779,6 +1826,7 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master, if (err) goto out; + lb_state->force_enable = true; return 0; out: @@ -1787,16 +1835,22 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master, } static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master, - struct mlx5_core_dev *slave) + struct mlx5_core_dev *slave, + struct mlx5_ib_lb_state *lb_state) { mlx5_nic_vport_update_local_lb(slave, false); mlx5_nic_vport_update_local_lb(master, false); + + lb_state->force_enable = false; } int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) { int err = 0; + if (dev->lb.force_enable) + return 0; + mutex_lock(&dev->lb.mutex); if (td) dev->lb.user_td++; @@ -1818,6 +1872,9 @@ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) { + if (dev->lb.force_enable) + return; + mutex_lock(&dev->lb.mutex); if (td) dev->lb.user_td--; @@ -3475,7 +3532,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, lockdep_assert_held(&mlx5_ib_multiport_mutex); - mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev); + mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb); mlx5_core_mp_event_replay(ibdev->mdev, MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, @@ -3572,7 +3629,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, MLX5_DRIVER_EVENT_AFFILIATION_DONE, &key); - err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev); + err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb); if (err) goto unbind; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 29bde64ea1eac..f49cb588a856d 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1083,6 +1083,7 @@ struct mlx5_ib_lb_state { u32 user_td; int qps; bool enabled; + bool force_enable; }; struct mlx5_ib_pf_eq { diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 80332638d9e3a..be6cd8ce4d97e 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -132,8 +132,12 @@ static void do_task(struct rxe_task *task) * yield the cpu and reschedule the task */ if (!ret) { - task->state = TASK_STATE_IDLE; - resched = 1; + if (task->state != TASK_STATE_DRAINING) { + task->state = TASK_STATE_IDLE; + resched = 1; + } else { + cont = 1; + } goto exit; } diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 7ca0297d68a4a..d0c0cde09f118 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -773,7 +773,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, struct siw_wqe *wqe = tx_wqe(qp); unsigned long flags; - int rv = 0; + int rv = 0, imm_err = 0; if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) { siw_dbg_qp(qp, "wr must be empty for user mapped sq\n"); @@ -959,9 +959,17 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, * Send directly if SQ processing is not in progress. * Eventual immediate errors (rv < 0) do not affect the involved * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ - * processing, if new work is already pending. But rv must be passed - * to caller. + * processing, if new work is already pending. But rv and pointer + * to failed work request must be passed to caller. */ + if (unlikely(rv < 0)) { + /* + * Immediate error + */ + siw_dbg_qp(qp, "Immediate error %d\n", rv); + imm_err = rv; + *bad_wr = wr; + } if (wqe->wr_status != SIW_WR_IDLE) { spin_unlock_irqrestore(&qp->sq_lock, flags); goto skip_direct_sending; @@ -986,15 +994,10 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, up_read(&qp->state_lock); - if (rv >= 0) - return 0; - /* - * Immediate error - */ - siw_dbg_qp(qp, "error %d\n", rv); + if (unlikely(imm_err)) + return imm_err; - *bad_wr = wr; - return rv; + return (rv >= 0) ? 0 : rv; } /* diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 2c51ea9d01d77..13336a2fd49c8 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -775,6 +775,7 @@ static int uinput_ff_upload_to_user(char __user *buffer, if (in_compat_syscall()) { struct uinput_ff_upload_compat ff_up_compat; + memset(&ff_up_compat, 0, sizeof(ff_up_compat)); ff_up_compat.request_id = ff_up->request_id; ff_up_compat.retval = ff_up->retval; /* diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 3ddabc5a2c999..d7496d47eabe8 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -3319,7 +3319,7 @@ static int mxt_probe(struct i2c_client *client) if (data->reset_gpio) { /* Wait a while and then de-assert the RESET GPIO line */ msleep(MXT_RESET_GPIO_TIME); - gpiod_set_value(data->reset_gpio, 0); + gpiod_set_value_cansleep(data->reset_gpio, 0); msleep(MXT_RESET_INVALID_CHG); } diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c index affbf4a1558de..5aa7f46a420b5 100644 --- a/drivers/iommu/intel/debugfs.c +++ b/drivers/iommu/intel/debugfs.c @@ -435,8 +435,21 @@ static int domain_translation_struct_show(struct seq_file *m, } pgd &= VTD_PAGE_MASK; } else { /* legacy mode */ - pgd = context->lo & VTD_PAGE_MASK; - agaw = context->hi & 7; + u8 tt = (u8)(context->lo & GENMASK_ULL(3, 2)) >> 2; + + /* + * According to Translation Type(TT), + * get the page table pointer(SSPTPTR). + */ + switch (tt) { + case CONTEXT_TT_MULTI_LEVEL: + case CONTEXT_TT_DEV_IOTLB: + pgd = context->lo & VTD_PAGE_MASK; + agaw = context->hi & 7; + break; + default: + goto iommu_unlock; + } } seq_printf(m, "Device %04x:%02x:%02x.%x ", diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 32d6710e264a4..c799cc67db34e 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3952,7 +3952,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) } if (info->ats_supported && ecap_prs(iommu->ecap) && - pci_pri_supported(pdev)) + ecap_pds(iommu->ecap) && pci_pri_supported(pdev)) info->pri_supported = 1; } } @@ -4328,13 +4328,14 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, break; } } - WARN_ON_ONCE(!dev_pasid); spin_unlock_irqrestore(&dmar_domain->lock, flags); cache_tag_unassign_domain(dmar_domain, dev, pasid); domain_detach_iommu(dmar_domain, iommu); - intel_iommu_debugfs_remove_dev_pasid(dev_pasid); - kfree(dev_pasid); + if (!WARN_ON_ONCE(!dev_pasid)) { + intel_iommu_debugfs_remove_dev_pasid(dev_pasid); + kfree(dev_pasid); + } intel_pasid_tear_down_entry(iommu, dev, pasid, false); intel_drain_pasid_prq(dev, pasid); } diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index f521155fb793b..df24a62e8ca40 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -541,7 +541,8 @@ enum { #define pasid_supported(iommu) (sm_supported(iommu) && \ ecap_pasid((iommu)->ecap)) #define ssads_supported(iommu) (sm_supported(iommu) && \ - ecap_slads((iommu)->ecap)) + ecap_slads((iommu)->ecap) && \ + ecap_smpwc(iommu->ecap)) #define nested_supported(iommu) (sm_supported(iommu) && \ ecap_nest((iommu)->ecap)) diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c index 1b0812f8bf840..af39b2379d534 100644 --- a/drivers/iommu/iommufd/fault.c +++ b/drivers/iommu/iommufd/fault.c @@ -415,7 +415,7 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd) fdno = get_unused_fd_flags(O_CLOEXEC); if (fdno < 0) { rc = fdno; - goto out_fput; + goto out_abort; } cmd->out_fault_id = fault->obj.id; @@ -431,8 +431,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd) return 0; out_put_fdno: put_unused_fd(fdno); -out_fput: - fput(filep); out_abort: iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj); diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index 649fe79d0f0cc..6add737a67084 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -23,6 +23,7 @@ #include "iommufd_test.h" struct iommufd_object_ops { + size_t file_offset; void (*destroy)(struct iommufd_object *obj); void (*abort)(struct iommufd_object *obj); }; @@ -97,10 +98,30 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj) void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx, struct iommufd_object *obj) { - if (iommufd_object_ops[obj->type].abort) - iommufd_object_ops[obj->type].abort(obj); + const struct iommufd_object_ops *ops = &iommufd_object_ops[obj->type]; + + if (ops->file_offset) { + struct file **filep = ((void *)obj) + ops->file_offset; + + /* + * A file should hold a users refcount while the file is open + * and put it back in its release. The file should hold a + * pointer to obj in their private data. Normal fput() is + * deferred to a workqueue and can get out of order with the + * following kfree(obj). Using the sync version ensures the + * release happens immediately. During abort we require the file + * refcount is one at this point - meaning the object alloc + * function cannot do anything to allow another thread to take a + * refcount prior to a guaranteed success. + */ + if (*filep) + __fput_sync(*filep); + } + + if (ops->abort) + ops->abort(obj); else - iommufd_object_ops[obj->type].destroy(obj); + ops->destroy(obj); iommufd_object_abort(ictx, obj); } @@ -498,6 +519,12 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx) } EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, IOMMUFD); +#define IOMMUFD_FILE_OFFSET(_struct, _filep, _obj) \ + .file_offset = (offsetof(_struct, _filep) + \ + BUILD_BUG_ON_ZERO(!__same_type( \ + struct file *, ((_struct *)NULL)->_filep)) + \ + BUILD_BUG_ON_ZERO(offsetof(_struct, _obj))) + static const struct iommufd_object_ops iommufd_object_ops[] = { [IOMMUFD_OBJ_ACCESS] = { .destroy = iommufd_access_destroy_object, @@ -518,6 +545,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = { }, [IOMMUFD_OBJ_FAULT] = { .destroy = iommufd_fault_destroy, + IOMMUFD_FILE_OFFSET(struct iommufd_fault, filep, obj), }, #ifdef CONFIG_IOMMUFD_TEST [IOMMUFD_OBJ_SELFTEST] = { diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 36dbcf2d728a5..9c4af7d588463 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -252,11 +252,11 @@ static int plic_irq_suspend(void) priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; - for (i = 0; i < priv->nr_irqs; i++) - if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)) - __set_bit(i, priv->prio_save); - else - __clear_bit(i, priv->prio_save); + /* irq ID 0 is reserved */ + for (i = 1; i < priv->nr_irqs; i++) { + __assign_bit(i, priv->prio_save, + readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)); + } for_each_cpu(cpu, cpu_present_mask) { struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); @@ -284,7 +284,8 @@ static void plic_irq_resume(void) priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; - for (i = 0; i < priv->nr_irqs; i++) { + /* irq ID 0 is reserved */ + for (i = 1; i < priv->nr_irqs; i++) { index = BIT_WORD(i); writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c index 07a83bb2dfdf6..bb00097b1ae59 100644 --- a/drivers/leds/flash/leds-qcom-flash.c +++ b/drivers/leds/flash/leds-qcom-flash.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -114,36 +114,39 @@ enum { REG_THERM_THRSH1, REG_THERM_THRSH2, REG_THERM_THRSH3, + REG_TORCH_CLAMP, REG_MAX_COUNT, }; static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = { - REG_FIELD(0x08, 0, 7), /* status1 */ - REG_FIELD(0x09, 0, 7), /* status2 */ - REG_FIELD(0x0a, 0, 7), /* status3 */ - REG_FIELD_ID(0x40, 0, 7, 3, 1), /* chan_timer */ - REG_FIELD_ID(0x43, 0, 6, 3, 1), /* itarget */ - REG_FIELD(0x46, 7, 7), /* module_en */ - REG_FIELD(0x47, 0, 5), /* iresolution */ - REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */ - REG_FIELD(0x4c, 0, 2), /* chan_en */ - REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */ - REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */ - REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */ + [REG_STATUS1] = REG_FIELD(0x08, 0, 7), + [REG_STATUS2] = REG_FIELD(0x09, 0, 7), + [REG_STATUS3] = REG_FIELD(0x0a, 0, 7), + [REG_CHAN_TIMER] = REG_FIELD_ID(0x40, 0, 7, 3, 1), + [REG_ITARGET] = REG_FIELD_ID(0x43, 0, 6, 3, 1), + [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7), + [REG_IRESOLUTION] = REG_FIELD(0x47, 0, 5), + [REG_CHAN_STROBE] = REG_FIELD_ID(0x49, 0, 2, 3, 1), + [REG_CHAN_EN] = REG_FIELD(0x4c, 0, 2), + [REG_THERM_THRSH1] = REG_FIELD(0x56, 0, 2), + [REG_THERM_THRSH2] = REG_FIELD(0x57, 0, 2), + [REG_THERM_THRSH3] = REG_FIELD(0x58, 0, 2), + [REG_TORCH_CLAMP] = REG_FIELD(0xec, 0, 6), }; static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = { - REG_FIELD(0x06, 0, 7), /* status1 */ - REG_FIELD(0x07, 0, 6), /* status2 */ - REG_FIELD(0x09, 0, 7), /* status3 */ - REG_FIELD_ID(0x3e, 0, 7, 4, 1), /* chan_timer */ - REG_FIELD_ID(0x42, 0, 6, 4, 1), /* itarget */ - REG_FIELD(0x46, 7, 7), /* module_en */ - REG_FIELD(0x49, 0, 3), /* iresolution */ - REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */ - REG_FIELD(0x4e, 0, 3), /* chan_en */ - REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */ - REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */ + [REG_STATUS1] = REG_FIELD(0x06, 0, 7), + [REG_STATUS2] = REG_FIELD(0x07, 0, 6), + [REG_STATUS3] = REG_FIELD(0x09, 0, 7), + [REG_CHAN_TIMER] = REG_FIELD_ID(0x3e, 0, 7, 4, 1), + [REG_ITARGET] = REG_FIELD_ID(0x42, 0, 6, 4, 1), + [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7), + [REG_IRESOLUTION] = REG_FIELD(0x49, 0, 3), + [REG_CHAN_STROBE] = REG_FIELD_ID(0x4a, 0, 6, 4, 1), + [REG_CHAN_EN] = REG_FIELD(0x4e, 0, 3), + [REG_THERM_THRSH1] = REG_FIELD(0x7a, 0, 2), + [REG_THERM_THRSH2] = REG_FIELD(0x78, 0, 2), + [REG_TORCH_CLAMP] = REG_FIELD(0xed, 0, 6), }; struct qcom_flash_data { @@ -156,6 +159,7 @@ struct qcom_flash_data { u8 max_channels; u8 chan_en_bits; u8 revision; + u8 torch_clamp; }; struct qcom_flash_led { @@ -702,6 +706,7 @@ static int qcom_flash_register_led_device(struct device *dev, u32 current_ua, timeout_us; u32 channels[4]; int i, rc, count; + u8 torch_clamp; count = fwnode_property_count_u32(node, "led-sources"); if (count <= 0) { @@ -751,6 +756,12 @@ static int qcom_flash_register_led_device(struct device *dev, current_ua = min_t(u32, current_ua, TORCH_CURRENT_MAX_UA * led->chan_count); led->max_torch_current_ma = current_ua / UA_PER_MA; + torch_clamp = (current_ua / led->chan_count) / TORCH_IRES_UA; + if (torch_clamp != 0) + torch_clamp--; + + flash_data->torch_clamp = max_t(u8, flash_data->torch_clamp, torch_clamp); + if (fwnode_property_present(node, "flash-max-microamp")) { flash->led_cdev.flags |= LED_DEV_CAP_FLASH; @@ -918,8 +929,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev) flash_data->leds_count++; } - return 0; - + return regmap_field_write(flash_data->r_fields[REG_TORCH_CLAMP], flash_data->torch_clamp); release: fwnode_handle_put(child); while (flash_data->v4l2_flash[flash_data->leds_count] && flash_data->leds_count) diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index e71456a56ab8d..fd447eb7eb15e 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -212,7 +212,7 @@ int lp55xx_update_program_memory(struct lp55xx_chip *chip, * For LED chip that support page, PAGE is already set in load_engine. */ if (!cfg->pages_per_engine) - start_addr += LP55xx_BYTES_PER_PAGE * idx; + start_addr += LP55xx_BYTES_PER_PAGE * (idx - 1); for (page = 0; page < program_length / LP55xx_BYTES_PER_PAGE; page++) { /* Write to the next page each 32 bytes (if supported) */ diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index d24f71819c3d6..38ab35157c85f 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -379,20 +379,13 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); struct cmdq_task *task; unsigned long curr_pa, end_pa; - int ret; /* Client should not flush new tasks if suspended. */ WARN_ON(cmdq->suspended); - ret = pm_runtime_get_sync(cmdq->mbox.dev); - if (ret < 0) - return ret; - task = kzalloc(sizeof(*task), GFP_ATOMIC); - if (!task) { - pm_runtime_put_autosuspend(cmdq->mbox.dev); + if (!task) return -ENOMEM; - } task->cmdq = cmdq; INIT_LIST_HEAD(&task->list_entry); @@ -439,9 +432,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) } list_move_tail(&task->list_entry, &thread->task_busy_list); - pm_runtime_mark_last_busy(cmdq->mbox.dev); - pm_runtime_put_autosuspend(cmdq->mbox.dev); - return 0; } diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index d59fcb74b3479..5bf81b60e9e67 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -62,7 +62,8 @@ #define DST_BIT_POS 9U #define SRC_BITMASK GENMASK(11, 8) -#define MAX_SGI 16 +/* Macro to represent SGI type for IPI IRQs */ +#define IPI_IRQ_TYPE_SGI 2 /* * Module parameters @@ -121,6 +122,7 @@ struct zynqmp_ipi_mbox { * @dev: device pointer corresponding to the Xilinx ZynqMP * IPI agent * @irq: IPI agent interrupt ID + * @irq_type: IPI SGI or SPI IRQ type * @method: IPI SMC or HVC is going to be used * @local_id: local IPI agent ID * @virq_sgi: IRQ number mapped to SGI @@ -130,6 +132,7 @@ struct zynqmp_ipi_mbox { struct zynqmp_ipi_pdata { struct device *dev; int irq; + unsigned int irq_type; unsigned int method; u32 local_id; int virq_sgi; @@ -887,17 +890,14 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata) struct zynqmp_ipi_mbox *ipi_mbox; int i; - if (pdata->irq < MAX_SGI) + if (pdata->irq_type == IPI_IRQ_TYPE_SGI) xlnx_mbox_cleanup_sgi(pdata); - i = pdata->num_mboxes; + i = pdata->num_mboxes - 1; for (; i >= 0; i--) { ipi_mbox = &pdata->ipi_mboxes[i]; - if (ipi_mbox->dev.parent) { - mbox_controller_unregister(&ipi_mbox->mbox); - if (device_is_registered(&ipi_mbox->dev)) - device_unregister(&ipi_mbox->dev); - } + if (device_is_registered(&ipi_mbox->dev)) + device_unregister(&ipi_mbox->dev); } } @@ -959,14 +959,16 @@ static int zynqmp_ipi_probe(struct platform_device *pdev) dev_err(dev, "failed to parse interrupts\n"); goto free_mbox_dev; } - ret = out_irq.args[1]; + + /* Use interrupt type to distinguish SGI and SPI interrupts */ + pdata->irq_type = out_irq.args[0]; /* * If Interrupt number is in SGI range, then request SGI else request * IPI system IRQ. */ - if (ret < MAX_SGI) { - pdata->irq = ret; + if (pdata->irq_type == IPI_IRQ_TYPE_SGI) { + pdata->irq = out_irq.args[1]; ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata); if (ret) goto free_mbox_dev; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index f3a3f2ef63226..391c1df19a764 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -162,6 +162,7 @@ struct mapped_device { #define DMF_SUSPENDED_INTERNALLY 7 #define DMF_POST_SUSPENDING 8 #define DMF_EMULATE_ZONE_APPEND 9 +#define DMF_QUEUE_STOPPED 10 void disable_discard(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 450e1a7e7bac7..444cf35feebf4 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -133,7 +133,7 @@ struct journal_sector { commit_id_t commit_id; }; -#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK])) +#define MAX_TAG_SIZE 255 #define METADATA_PADDING_SECTORS 8 diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c index 12f954a0c5325..afb062e1f1fb4 100644 --- a/drivers/md/dm-vdo/indexer/volume-index.c +++ b/drivers/md/dm-vdo/indexer/volume-index.c @@ -836,7 +836,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index, "%zu bytes decoded of %zu expected", offset, sizeof(buffer)); if (result != VDO_SUCCESS) - result = UDS_CORRUPT_DATA; + return UDS_CORRUPT_DATA; if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) { return vdo_log_warning_strerror(UDS_CORRUPT_DATA, @@ -928,7 +928,7 @@ static int start_restoring_volume_index(struct volume_index *volume_index, "%zu bytes decoded of %zu expected", offset, sizeof(buffer)); if (result != VDO_SUCCESS) - result = UDS_CORRUPT_DATA; + return UDS_CORRUPT_DATA; if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0) return vdo_log_warning_strerror(UDS_CORRUPT_DATA, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a7deeda59a55a..fd84a126f63fb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2918,7 +2918,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, { bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; - int r; + int r = 0; lockdep_assert_held(&md->suspend_lock); @@ -2970,8 +2970,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, * Stop md->queue before flushing md->wq in case request-based * dm defers requests to md->wq from md->queue. */ - if (dm_request_based(md)) + if (map && dm_request_based(md)) { dm_stop_queue(md->queue); + set_bit(DMF_QUEUE_STOPPED, &md->flags); + } flush_workqueue(md->wq); @@ -2980,7 +2982,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, * We call dm_wait_for_completion to wait for all existing requests * to finish. */ - r = dm_wait_for_completion(md, task_state); + if (map) + r = dm_wait_for_completion(md, task_state); if (!r) set_bit(dmf_suspended_flag, &md->flags); @@ -2993,7 +2996,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, if (r < 0) { dm_queue_flush(md); - if (dm_request_based(md)) + if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags)) dm_start_queue(md->queue); unlock_fs(md); @@ -3077,7 +3080,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map) * so that mapping of targets can work correctly. * Request-based dm is queueing the deferred I/Os in its request_queue. */ - if (dm_request_based(md)) + if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags)) dm_start_queue(md->queue); unlock_fs(md); diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 369aed044b409..d733ebee624b7 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -267,6 +267,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio) } bio_chain(split, bio); + trace_block_split(split, bio->bi_iter.bi_sector); submit_bio_noacct(bio); bio = split; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 31bea72bcb01a..db1ab214250f9 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -464,7 +464,15 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) struct bio *split = bio_split(bio, zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, &mddev->bio_set); + + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return; + } + bio_chain(split, bio); + trace_block_split(split, bio->bi_iter.bi_sector); submit_bio_noacct(bio); bio = split; end = zone->zone_end; @@ -606,7 +614,15 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) if (sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, sectors, GFP_NOIO, &mddev->bio_set); + + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return true; + } + bio_chain(split, bio); + trace_block_split(split, bio->bi_iter.bi_sector); raid0_map_submit_bio(mddev, bio); bio = split; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index faccf7344ef93..4c6b1bd6da9bb 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1317,7 +1317,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, struct raid1_info *mirror; struct bio *read_bio; int max_sectors; - int rdisk; + int rdisk, error; bool r1bio_existed = !!r1_bio; /* @@ -1378,7 +1378,14 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); + + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } + bio_chain(split, bio); + trace_block_split(split, bio->bi_iter.bi_sector); submit_bio_noacct(bio); bio = split; r1_bio->master_bio = bio; @@ -1404,6 +1411,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, read_bio->bi_private = r1_bio; mddev_trace_remap(mddev, read_bio, r1_bio->sector); submit_bio_noacct(read_bio); + return; + +err_handle: + atomic_dec(&mirror->rdev->nr_pending); + bio->bi_status = errno_to_blk_status(error); + set_bit(R1BIO_Uptodate, &r1_bio->state); + raid_end_bio_io(r1_bio); } static void raid1_write_request(struct mddev *mddev, struct bio *bio, @@ -1411,7 +1425,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, { struct r1conf *conf = mddev->private; struct r1bio *r1_bio; - int i, disks; + int i, disks, k, error; unsigned long flags; struct md_rdev *blocked_rdev; int first_clone; @@ -1557,7 +1571,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, GFP_NOIO, &conf->bio_split); + + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } + bio_chain(split, bio); + trace_block_split(split, bio->bi_iter.bi_sector); submit_bio_noacct(bio); bio = split; r1_bio->master_bio = bio; @@ -1640,6 +1661,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, /* In case raid1d snuck in to freeze_array */ wake_up_barrier(conf); + return; +err_handle: + for (k = 0; k < i; k++) { + if (r1_bio->bios[k]) { + rdev_dec_pending(conf->mirrors[k].rdev, mddev); + r1_bio->bios[k] = NULL; + } + } + + bio->bi_status = errno_to_blk_status(error); + set_bit(R1BIO_Uptodate, &r1_bio->state); + raid_end_bio_io(r1_bio); } static bool raid1_make_request(struct mddev *mddev, struct bio *bio) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 6579bbb6a39a5..b0062ad9b1d95 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1153,6 +1153,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, int slot = r10_bio->read_slot; struct md_rdev *err_rdev = NULL; gfp_t gfp = GFP_NOIO; + int error; if (slot >= 0 && r10_bio->devs[slot].rdev) { /* @@ -1203,7 +1204,13 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } + bio_chain(split, bio); + trace_block_split(split, bio->bi_iter.bi_sector); allow_barrier(conf); submit_bio_noacct(bio); wait_barrier(conf, false); @@ -1233,6 +1240,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, mddev_trace_remap(mddev, read_bio, r10_bio->sector); submit_bio_noacct(read_bio); return; +err_handle: + atomic_dec(&rdev->nr_pending); + bio->bi_status = errno_to_blk_status(error); + set_bit(R10BIO_Uptodate, &r10_bio->state); + raid_end_bio_io(r10_bio); } static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, @@ -1341,9 +1353,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, struct r10bio *r10_bio) { struct r10conf *conf = mddev->private; - int i; + int i, k; sector_t sectors; int max_sectors; + int error; if ((mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, @@ -1469,7 +1482,13 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, if (r10_bio->sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, r10_bio->sectors, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } + bio_chain(split, bio); + trace_block_split(split, bio->bi_iter.bi_sector); allow_barrier(conf); submit_bio_noacct(bio); wait_barrier(conf, false); @@ -1488,6 +1507,26 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, raid10_write_one_disk(mddev, r10_bio, bio, true, i); } one_write_done(r10_bio); + return; +err_handle: + for (k = 0; k < i; k++) { + int d = r10_bio->devs[k].devnum; + struct md_rdev *rdev = conf->mirrors[d].rdev; + struct md_rdev *rrdev = conf->mirrors[d].replacement; + + if (r10_bio->devs[k].bio) { + rdev_dec_pending(rdev, mddev); + r10_bio->devs[k].bio = NULL; + } + if (r10_bio->devs[k].repl_bio) { + rdev_dec_pending(rrdev, mddev); + r10_bio->devs[k].repl_bio = NULL; + } + } + + bio->bi_status = errno_to_blk_status(error); + set_bit(R10BIO_Uptodate, &r10_bio->state); + raid_end_bio_io(r10_bio); } static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) @@ -1629,7 +1668,14 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (remainder) { split_size = stripe_size - remainder; split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return 0; + } + bio_chain(split, bio); + trace_block_split(split, bio->bi_iter.bi_sector); allow_barrier(conf); /* Resend the fist split part */ submit_bio_noacct(split); @@ -1639,7 +1685,14 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (remainder) { split_size = bio_sectors(bio) - remainder; split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return 0; + } + bio_chain(split, bio); + trace_block_split(split, bio->bi_iter.bi_sector); allow_barrier(conf); /* Resend the second split part */ submit_bio_noacct(bio); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 39e7596e78c0b..4fae8ade24090 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5484,8 +5484,10 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) if (sectors < bio_sectors(raid_bio)) { struct r5conf *conf = mddev->private; + split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); bio_chain(split, raid_bio); + trace_block_split(split, raid_bio->bi_iter.bi_sector); submit_bio_noacct(raid_bio); raid_bio = split; } diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile index 2e8f7f60263f1..08d58524419f7 100644 --- a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile +++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile @@ -1,8 +1,2 @@ extron-da-hd-4k-plus-cec-objs := extron-da-hd-4k-plus.o cec-splitter.o obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) := extron-da-hd-4k-plus-cec.o - -all: - $(MAKE) -C $(KDIR) M=$(shell pwd) modules - -install: - $(MAKE) -C $(KDIR) M=$(shell pwd) modules_install diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c index 723fe138e7bcc..8bf06a763a251 100644 --- a/drivers/media/i2c/mt9v111.c +++ b/drivers/media/i2c/mt9v111.c @@ -532,8 +532,8 @@ static int mt9v111_calc_frame_rate(struct mt9v111_dev *mt9v111, static int mt9v111_hw_config(struct mt9v111_dev *mt9v111) { struct i2c_client *c = mt9v111->client; - unsigned int ret; u16 outfmtctrl2; + int ret; /* Force device reset. */ ret = __mt9v111_hw_reset(mt9v111); diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c index b7ca39f63dba8..6dfc912168510 100644 --- a/drivers/media/i2c/rj54n1cb0c.c +++ b/drivers/media/i2c/rj54n1cb0c.c @@ -1329,10 +1329,13 @@ static int rj54n1_probe(struct i2c_client *client) V4L2_CID_GAIN, 0, 127, 1, 66); v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); - rj54n1->subdev.ctrl_handler = &rj54n1->hdl; - if (rj54n1->hdl.error) - return rj54n1->hdl.error; + if (rj54n1->hdl.error) { + ret = rj54n1->hdl.error; + goto err_free_ctrl; + } + + rj54n1->subdev.ctrl_handler = &rj54n1->hdl; rj54n1->clk_div = clk_div; rj54n1->rect.left = RJ54N1_COLUMN_SKIP; rj54n1->rect.top = RJ54N1_ROW_SKIP; diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index d1306f39fa135..6a023fe22635a 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -2189,10 +2189,10 @@ static int tc358743_probe(struct i2c_client *client) err_work_queues: cec_unregister_adapter(state->cec_adap); if (!state->i2c_client->irq) { - del_timer(&state->timer); + timer_delete_sync(&state->timer); flush_work(&state->work_i2c_poll); } - cancel_delayed_work(&state->delayed_work_enable_hotplug); + cancel_delayed_work_sync(&state->delayed_work_enable_hotplug); mutex_destroy(&state->confctl_mutex); err_hdl: media_entity_cleanup(&sd->entity); diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c index 56444edaf1365..6daa7aa994422 100644 --- a/drivers/media/mc/mc-devnode.c +++ b/drivers/media/mc/mc-devnode.c @@ -50,11 +50,6 @@ static void media_devnode_release(struct device *cd) { struct media_devnode *devnode = to_media_devnode(cd); - mutex_lock(&media_devnode_lock); - /* Mark device node number as free */ - clear_bit(devnode->minor, media_devnode_nums); - mutex_unlock(&media_devnode_lock); - /* Release media_devnode and perform other cleanups as needed. */ if (devnode->release) devnode->release(devnode); @@ -281,6 +276,7 @@ void media_devnode_unregister(struct media_devnode *devnode) /* Delete the cdev on this minor as well */ cdev_device_del(&devnode->cdev, &devnode->dev); devnode->media_dev = NULL; + clear_bit(devnode->minor, media_devnode_nums); mutex_unlock(&media_devnode_lock); put_device(&devnode->dev); diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c index 96dd0f6ccd0d0..b3761839a5218 100644 --- a/drivers/media/mc/mc-entity.c +++ b/drivers/media/mc/mc-entity.c @@ -691,7 +691,7 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, * (already discovered through iterating over links) and pads * not internally connected. */ - if (origin == local || !local->num_links || + if (origin == local || local->num_links || !media_entity_has_pad_interdep(origin->entity, origin->index, local->index)) continue; diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c index 486c8ec0fa60d..ab53c5b02c48d 100644 --- a/drivers/media/pci/b2c2/flexcop-pci.c +++ b/drivers/media/pci/b2c2/flexcop-pci.c @@ -411,7 +411,7 @@ static void flexcop_pci_remove(struct pci_dev *pdev) struct flexcop_pci *fc_pci = pci_get_drvdata(pdev); if (irq_chk_intv > 0) - cancel_delayed_work(&fc_pci->irq_check_work); + cancel_delayed_work_sync(&fc_pci->irq_check_work); flexcop_pci_dma_exit(fc_pci); flexcop_device_exit(fc_pci->fc_dev); diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c index 013694bfcb1c1..7cbb2d5869320 100644 --- a/drivers/media/pci/cx18/cx18-queue.c +++ b/drivers/media/pci/cx18/cx18-queue.c @@ -379,15 +379,22 @@ int cx18_stream_alloc(struct cx18_stream *s) break; } + buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev, + buf->buf, s->buf_size, + s->dma); + if (dma_mapping_error(&s->cx->pci_dev->dev, buf->dma_handle)) { + kfree(buf->buf); + kfree(mdl); + kfree(buf); + break; + } + INIT_LIST_HEAD(&mdl->list); INIT_LIST_HEAD(&mdl->buf_list); mdl->id = s->mdl_base_idx; /* a somewhat safe value */ cx18_enqueue(s, mdl, &s->q_idle); INIT_LIST_HEAD(&buf->list); - buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev, - buf->buf, s->buf_size, - s->dma); cx18_buf_sync_for_cpu(s, buf); list_add_tail(&buf->list, &s->buf_pool); } diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c index b7aaa8b4a7841..e39bf64c5c715 100644 --- a/drivers/media/pci/ivtv/ivtv-irq.c +++ b/drivers/media/pci/ivtv/ivtv-irq.c @@ -351,7 +351,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock) /* Insert buffer block for YUV if needed */ if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) { - if (yi->blanking_dmaptr) { + if (yi->blanking_ptr) { s->sg_pending[idx].src = yi->blanking_dmaptr; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = 720 * 16; diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c index 2d9274537725a..71f0401066471 100644 --- a/drivers/media/pci/ivtv/ivtv-yuv.c +++ b/drivers/media/pci/ivtv/ivtv-yuv.c @@ -125,7 +125,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma, ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size); /* If we've offset the y plane, ensure top area is blanked */ - if (f->offset_y && yi->blanking_dmaptr) { + if (f->offset_y && yi->blanking_ptr) { dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16); dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr); dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]); @@ -929,6 +929,12 @@ static void ivtv_yuv_init(struct ivtv *itv) yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev, yi->blanking_ptr, 720 * 16, DMA_TO_DEVICE); + if (dma_mapping_error(&itv->pdev->dev, yi->blanking_dmaptr)) { + kfree(yi->blanking_ptr); + yi->blanking_ptr = NULL; + yi->blanking_dmaptr = 0; + IVTV_DEBUG_WARN("Failed to dma_map yuv blanking buffer\n"); + } } else { yi->blanking_dmaptr = 0; IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n"); diff --git a/drivers/media/pci/mgb4/mgb4_trigger.c b/drivers/media/pci/mgb4/mgb4_trigger.c index 923650d53d4c8..d7dddc5c8728e 100644 --- a/drivers/media/pci/mgb4/mgb4_trigger.c +++ b/drivers/media/pci/mgb4/mgb4_trigger.c @@ -91,7 +91,7 @@ static irqreturn_t trigger_handler(int irq, void *p) struct { u32 data; s64 ts __aligned(8); - } scan; + } scan = { }; scan.data = mgb4_read_reg(&st->mgbdev->video, 0xA0); mgb4_write_reg(&st->mgbdev->video, 0xA0, scan.data); diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h index 1cd990468d3de..d05e222b39215 100644 --- a/drivers/media/pci/zoran/zoran.h +++ b/drivers/media/pci/zoran/zoran.h @@ -154,12 +154,6 @@ struct zoran_jpg_settings { struct zoran; -/* zoran_fh contains per-open() settings */ -struct zoran_fh { - struct v4l2_fh fh; - struct zoran *zr; -}; - struct card_info { enum card_type type; char name[32]; diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c index 5c05e64c71a90..80377992a6073 100644 --- a/drivers/media/pci/zoran/zoran_driver.c +++ b/drivers/media/pci/zoran/zoran_driver.c @@ -511,12 +511,11 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran *zr = video_drvdata(file); - struct zoran_fh *fh = __fh; int i; int res = 0; if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) - return zoran_s_fmt_vid_out(file, fh, fmt); + return zoran_s_fmt_vid_out(file, __fh, fmt); for (i = 0; i < NUM_FORMATS; i++) if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc) diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h index 2810ebe9b5f75..5a4676d520793 100644 --- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h +++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h @@ -361,7 +361,7 @@ void mxc_isi_channel_get(struct mxc_isi_pipe *pipe); void mxc_isi_channel_put(struct mxc_isi_pipe *pipe); void mxc_isi_channel_enable(struct mxc_isi_pipe *pipe); void mxc_isi_channel_disable(struct mxc_isi_pipe *pipe); -int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass); +int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe); void mxc_isi_channel_unchain(struct mxc_isi_pipe *pipe); void mxc_isi_channel_config(struct mxc_isi_pipe *pipe, diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c index 5623914f95e64..9225a7ac1c3ee 100644 --- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c +++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c @@ -587,7 +587,7 @@ void mxc_isi_channel_release(struct mxc_isi_pipe *pipe) * * TODO: Support secondary line buffer for downscaling YUV420 images. */ -int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass) +int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe) { /* Channel chaining requires both line and output buffer. */ const u8 resources = MXC_ISI_CHANNEL_RES_OUTPUT_BUF diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c index cd6c52e9d158a..81223d28ee56e 100644 --- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c +++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c @@ -43,7 +43,6 @@ struct mxc_isi_m2m_ctx_queue_data { struct v4l2_pix_format_mplane format; const struct mxc_isi_format_info *info; u32 sequence; - bool streaming; }; struct mxc_isi_m2m_ctx { @@ -236,6 +235,65 @@ static void mxc_isi_m2m_vb2_buffer_queue(struct vb2_buffer *vb2) v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } +static int mxc_isi_m2m_vb2_prepare_streaming(struct vb2_queue *q) +{ + struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q); + const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format; + const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format; + const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info; + const struct mxc_isi_format_info *out_info = ctx->queues.out.info; + struct mxc_isi_m2m *m2m = ctx->m2m; + int ret; + + guard(mutex)(&m2m->lock); + + if (m2m->usage_count == INT_MAX) + return -EOVERFLOW; + + /* + * Acquire the pipe and initialize the channel with the first user of + * the M2M device. + */ + if (m2m->usage_count == 0) { + bool bypass = cap_pix->width == out_pix->width && + cap_pix->height == out_pix->height && + cap_info->encoding == out_info->encoding; + + ret = mxc_isi_channel_acquire(m2m->pipe, + &mxc_isi_m2m_frame_write_done, + bypass); + if (ret) + return ret; + + mxc_isi_channel_get(m2m->pipe); + } + + m2m->usage_count++; + + /* + * Allocate resources for the channel, counting how many users require + * buffer chaining. + */ + if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) { + ret = mxc_isi_channel_chain(m2m->pipe); + if (ret) + goto err_deinit; + + m2m->chained_count++; + ctx->chained = true; + } + + return 0; + +err_deinit: + if (--m2m->usage_count == 0) { + mxc_isi_channel_put(m2m->pipe); + mxc_isi_channel_release(m2m->pipe); + } + + return ret; +} + static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q, unsigned int count) { @@ -265,6 +323,35 @@ static void mxc_isi_m2m_vb2_stop_streaming(struct vb2_queue *q) } } +static void mxc_isi_m2m_vb2_unprepare_streaming(struct vb2_queue *q) +{ + struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q); + struct mxc_isi_m2m *m2m = ctx->m2m; + + guard(mutex)(&m2m->lock); + + /* + * If the last context is this one, reset it to make sure the device + * will be reconfigured when streaming is restarted. + */ + if (m2m->last_ctx == ctx) + m2m->last_ctx = NULL; + + /* Free the channel resources if this is the last chained context. */ + if (ctx->chained && --m2m->chained_count == 0) + mxc_isi_channel_unchain(m2m->pipe); + ctx->chained = false; + + /* Turn off the light with the last user. */ + if (--m2m->usage_count == 0) { + mxc_isi_channel_disable(m2m->pipe); + mxc_isi_channel_put(m2m->pipe); + mxc_isi_channel_release(m2m->pipe); + } + + WARN_ON(m2m->usage_count < 0); +} + static const struct vb2_ops mxc_isi_m2m_vb2_qops = { .queue_setup = mxc_isi_m2m_vb2_queue_setup, .buf_init = mxc_isi_m2m_vb2_buffer_init, @@ -272,8 +359,10 @@ static const struct vb2_ops mxc_isi_m2m_vb2_qops = { .buf_queue = mxc_isi_m2m_vb2_buffer_queue, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, + .prepare_streaming = mxc_isi_m2m_vb2_prepare_streaming, .start_streaming = mxc_isi_m2m_vb2_start_streaming, .stop_streaming = mxc_isi_m2m_vb2_stop_streaming, + .unprepare_streaming = mxc_isi_m2m_vb2_unprepare_streaming, }; static int mxc_isi_m2m_queue_init(void *priv, struct vb2_queue *src_vq, @@ -483,136 +572,6 @@ static int mxc_isi_m2m_s_fmt_vid(struct file *file, void *fh, return 0; } -static int mxc_isi_m2m_streamon(struct file *file, void *fh, - enum v4l2_buf_type type) -{ - struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh); - struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type); - const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format; - const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format; - const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info; - const struct mxc_isi_format_info *out_info = ctx->queues.out.info; - struct mxc_isi_m2m *m2m = ctx->m2m; - bool bypass; - int ret; - - if (q->streaming) - return 0; - - mutex_lock(&m2m->lock); - - if (m2m->usage_count == INT_MAX) { - ret = -EOVERFLOW; - goto unlock; - } - - bypass = cap_pix->width == out_pix->width && - cap_pix->height == out_pix->height && - cap_info->encoding == out_info->encoding; - - /* - * Acquire the pipe and initialize the channel with the first user of - * the M2M device. - */ - if (m2m->usage_count == 0) { - ret = mxc_isi_channel_acquire(m2m->pipe, - &mxc_isi_m2m_frame_write_done, - bypass); - if (ret) - goto unlock; - - mxc_isi_channel_get(m2m->pipe); - } - - m2m->usage_count++; - - /* - * Allocate resources for the channel, counting how many users require - * buffer chaining. - */ - if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) { - ret = mxc_isi_channel_chain(m2m->pipe, bypass); - if (ret) - goto deinit; - - m2m->chained_count++; - ctx->chained = true; - } - - /* - * Drop the lock to start the stream, as the .device_run() operation - * needs to acquire it. - */ - mutex_unlock(&m2m->lock); - ret = v4l2_m2m_ioctl_streamon(file, fh, type); - if (ret) { - /* Reacquire the lock for the cleanup path. */ - mutex_lock(&m2m->lock); - goto unchain; - } - - q->streaming = true; - - return 0; - -unchain: - if (ctx->chained && --m2m->chained_count == 0) - mxc_isi_channel_unchain(m2m->pipe); - ctx->chained = false; - -deinit: - if (--m2m->usage_count == 0) { - mxc_isi_channel_put(m2m->pipe); - mxc_isi_channel_release(m2m->pipe); - } - -unlock: - mutex_unlock(&m2m->lock); - return ret; -} - -static int mxc_isi_m2m_streamoff(struct file *file, void *fh, - enum v4l2_buf_type type) -{ - struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh); - struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type); - struct mxc_isi_m2m *m2m = ctx->m2m; - - v4l2_m2m_ioctl_streamoff(file, fh, type); - - if (!q->streaming) - return 0; - - mutex_lock(&m2m->lock); - - /* - * If the last context is this one, reset it to make sure the device - * will be reconfigured when streaming is restarted. - */ - if (m2m->last_ctx == ctx) - m2m->last_ctx = NULL; - - /* Free the channel resources if this is the last chained context. */ - if (ctx->chained && --m2m->chained_count == 0) - mxc_isi_channel_unchain(m2m->pipe); - ctx->chained = false; - - /* Turn off the light with the last user. */ - if (--m2m->usage_count == 0) { - mxc_isi_channel_disable(m2m->pipe); - mxc_isi_channel_put(m2m->pipe); - mxc_isi_channel_release(m2m->pipe); - } - - WARN_ON(m2m->usage_count < 0); - - mutex_unlock(&m2m->lock); - - q->streaming = false; - - return 0; -} - static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = { .vidioc_querycap = mxc_isi_m2m_querycap, @@ -633,8 +592,8 @@ static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = { .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, - .vidioc_streamon = mxc_isi_m2m_streamon, - .vidioc_streamoff = mxc_isi_m2m_streamoff, + .vidioc_streamon = v4l2_m2m_ioctl_streamon, + .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c index d76eb58deb096..a41c51dd9ce0f 100644 --- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c +++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c @@ -855,7 +855,7 @@ int mxc_isi_pipe_acquire(struct mxc_isi_pipe *pipe, /* Chain the channel if needed for wide resolutions. */ if (sink_fmt->width > MXC_ISI_MAX_WIDTH_UNCHAINED) { - ret = mxc_isi_channel_chain(pipe, bypass); + ret = mxc_isi_channel_chain(pipe); if (ret) mxc_isi_channel_release(pipe); } diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c index 66a18830e66da..4e2636b053669 100644 --- a/drivers/media/platform/qcom/venus/firmware.c +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -30,7 +30,7 @@ static void venus_reset_cpu(struct venus_core *core) u32 fw_size = core->fw.mapped_mem_size; void __iomem *wrapper_base; - if (IS_IRIS2_1(core)) + if (IS_IRIS2(core) || IS_IRIS2_1(core)) wrapper_base = core->wrapper_tz_base; else wrapper_base = core->wrapper_base; @@ -42,7 +42,7 @@ static void venus_reset_cpu(struct venus_core *core) writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR); writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR); - if (IS_IRIS2_1(core)) { + if (IS_IRIS2(core) || IS_IRIS2_1(core)) { /* Bring XTSS out of reset */ writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET); } else { @@ -68,7 +68,7 @@ int venus_set_hw_state(struct venus_core *core, bool resume) if (resume) { venus_reset_cpu(core); } else { - if (IS_IRIS2_1(core)) + if (IS_IRIS2(core) || IS_IRIS2_1(core)) writel(WRAPPER_XTSS_SW_RESET_BIT, core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET); else @@ -181,7 +181,7 @@ static int venus_shutdown_no_tz(struct venus_core *core) void __iomem *wrapper_base = core->wrapper_base; void __iomem *wrapper_tz_base = core->wrapper_tz_base; - if (IS_IRIS2_1(core)) { + if (IS_IRIS2(core) || IS_IRIS2_1(core)) { /* Assert the reset to XTSS */ reg = readl(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET); reg |= WRAPPER_XTSS_SW_RESET_BIT; diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c index 47bc3014b5d8b..f7c682fca6459 100644 --- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c +++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c @@ -14,8 +14,7 @@ #include "s5p_mfc_opr.h" #include "s5p_mfc_cmd_v6.h" -static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd, - const struct s5p_mfc_cmd_args *args) +static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd) { mfc_debug(2, "Issue the command: %d\n", cmd); @@ -31,7 +30,6 @@ static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd, static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev) { - struct s5p_mfc_cmd_args h2r_args; const struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv; int ret; @@ -41,33 +39,23 @@ static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev) mfc_write(dev, dev->ctx_buf.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6); mfc_write(dev, buf_size->dev_ctx, S5P_FIMV_CONTEXT_MEM_SIZE_V6); - return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6, - &h2r_args); + return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6); } static int s5p_mfc_sleep_cmd_v6(struct s5p_mfc_dev *dev) { - struct s5p_mfc_cmd_args h2r_args; - - memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args)); - return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6, - &h2r_args); + return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6); } static int s5p_mfc_wakeup_cmd_v6(struct s5p_mfc_dev *dev) { - struct s5p_mfc_cmd_args h2r_args; - - memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args)); - return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6, - &h2r_args); + return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6); } /* Open a new instance and get its number */ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - struct s5p_mfc_cmd_args h2r_args; int codec_type; mfc_debug(2, "Requested codec mode: %d\n", ctx->codec_mode); @@ -129,23 +117,20 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx) mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6); mfc_write(dev, 0, S5P_FIMV_D_CRC_CTRL_V6); /* no crc */ - return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6, - &h2r_args); + return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6); } /* Close instance */ static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - struct s5p_mfc_cmd_args h2r_args; int ret = 0; dev->curr_ctx = ctx->num; if (ctx->state != MFCINST_FREE) { mfc_write(dev, ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6); ret = s5p_mfc_cmd_host2risc_v6(dev, - S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6, - &h2r_args); + S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6); } else { ret = -EINVAL; } @@ -153,9 +138,15 @@ static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx) return ret; } +static int s5p_mfc_cmd_host2risc_v6_args(struct s5p_mfc_dev *dev, int cmd, + const struct s5p_mfc_cmd_args *ignored) +{ + return s5p_mfc_cmd_host2risc_v6(dev, cmd); +} + /* Initialize cmd function pointers for MFC v6 */ static const struct s5p_mfc_hw_cmds s5p_mfc_cmds_v6 = { - .cmd_host2risc = s5p_mfc_cmd_host2risc_v6, + .cmd_host2risc = s5p_mfc_cmd_host2risc_v6_args, .sys_init_cmd = s5p_mfc_sys_init_cmd_v6, .sleep_cmd = s5p_mfc_sleep_cmd_v6, .wakeup_cmd = s5p_mfc_wakeup_cmd_v6, diff --git a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c index 0533d4a083d24..a078f1107300e 100644 --- a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c +++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c @@ -239,7 +239,7 @@ static int delta_mjpeg_ipc_open(struct delta_ctx *pctx) return 0; } -static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au) +static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, dma_addr_t pstart, dma_addr_t pend) { struct delta_dev *delta = pctx->dev; struct delta_mjpeg_ctx *ctx = to_ctx(pctx); @@ -256,8 +256,8 @@ static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au) memset(params, 0, sizeof(*params)); - params->picture_start_addr_p = (u32)(au->paddr); - params->picture_end_addr_p = (u32)(au->paddr + au->size - 1); + params->picture_start_addr_p = pstart; + params->picture_end_addr_p = pend; /* * !WARNING! @@ -374,12 +374,14 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau) struct delta_dev *delta = pctx->dev; struct delta_mjpeg_ctx *ctx = to_ctx(pctx); int ret; - struct delta_au au = *pau; + void *au_vaddr = pau->vaddr; + dma_addr_t au_dma = pau->paddr; + size_t au_size = pau->size; unsigned int data_offset = 0; struct mjpeg_header *header = &ctx->header_struct; if (!ctx->header) { - ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size, + ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size, header, &data_offset); if (ret) { pctx->stream_errors++; @@ -405,17 +407,17 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau) goto err; } - ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size, + ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size, ctx->header, &data_offset); if (ret) { pctx->stream_errors++; goto err; } - au.paddr += data_offset; - au.vaddr += data_offset; + au_dma += data_offset; + au_vaddr += data_offset; - ret = delta_mjpeg_ipc_decode(pctx, &au); + ret = delta_mjpeg_ipc_decode(pctx, au_dma, au_dma + au_size - 1); if (ret) goto err; diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c index 3853245fcf6e5..f8cb93ce9459f 100644 --- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c +++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c @@ -52,6 +52,8 @@ #define DRAIN_TIMEOUT_MS 50 #define DRAIN_BUFFER_SIZE SZ_32K +#define CSI2RX_BRIDGE_SOURCE_PAD 1 + struct ti_csi2rx_fmt { u32 fourcc; /* Four character code. */ u32 code; /* Mbus code. */ @@ -426,8 +428,9 @@ static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier) if (ret) return ret; - ret = v4l2_create_fwnode_links_to_pad(csi->source, &csi->pad, - MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + ret = media_create_pad_link(&csi->source->entity, CSI2RX_BRIDGE_SOURCE_PAD, + &vdev->entity, csi->pad.index, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) { video_unregister_device(vdev); @@ -1121,7 +1124,7 @@ static int ti_csi2rx_probe(struct platform_device *pdev) if (ret) goto err_vb2q; - ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev); + ret = devm_of_platform_populate(csi->dev); if (ret) { dev_err(csi->dev, "Failed to create children: %d\n", ret); goto err_subdev; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 8f1361bcce3a6..1e7f800701331 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -536,7 +536,9 @@ static int display_open(struct inode *inode, struct file *file) mutex_lock(&ictx->lock); - if (!ictx->display_supported) { + if (ictx->disconnected) { + retval = -ENODEV; + } else if (!ictx->display_supported) { pr_err("display not supported by device\n"); retval = -ENODEV; } else if (ictx->display_isopen) { @@ -598,6 +600,9 @@ static int send_packet(struct imon_context *ictx) int retval = 0; struct usb_ctrlrequest *control_req = NULL; + if (ictx->disconnected) + return -ENODEV; + /* Check if we need to use control or interrupt urb */ if (!ictx->tx_control) { pipe = usb_sndintpipe(ictx->usbdev_intf0, @@ -949,12 +954,14 @@ static ssize_t vfd_write(struct file *file, const char __user *buf, static const unsigned char vfd_packet6[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF }; - if (ictx->disconnected) - return -ENODEV; - if (mutex_lock_interruptible(&ictx->lock)) return -ERESTARTSYS; + if (ictx->disconnected) { + retval = -ENODEV; + goto exit; + } + if (!ictx->dev_present_intf0) { pr_err_ratelimited("no iMON device present\n"); retval = -ENODEV; @@ -1029,11 +1036,13 @@ static ssize_t lcd_write(struct file *file, const char __user *buf, int retval = 0; struct imon_context *ictx = file->private_data; - if (ictx->disconnected) - return -ENODEV; - mutex_lock(&ictx->lock); + if (ictx->disconnected) { + retval = -ENODEV; + goto exit; + } + if (!ictx->display_supported) { pr_err_ratelimited("no iMON display present\n"); retval = -ENODEV; @@ -2499,7 +2508,11 @@ static void imon_disconnect(struct usb_interface *interface) int ifnum; ictx = usb_get_intfdata(interface); + + mutex_lock(&ictx->lock); ictx->disconnected = true; + mutex_unlock(&ictx->lock); + dev = ictx->dev; ifnum = interface->cur_altsetting->desc.bInterfaceNumber; diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index f042f3f14afa9..314f64420f629 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -736,11 +736,11 @@ int lirc_register(struct rc_dev *dev) cdev_init(&dev->lirc_cdev, &lirc_fops); + get_device(&dev->dev); + err = cdev_device_add(&dev->lirc_cdev, &dev->lirc_dev); if (err) - goto out_ida; - - get_device(&dev->dev); + goto out_put_device; switch (dev->driver_type) { case RC_DRIVER_SCANCODE: @@ -764,7 +764,8 @@ int lirc_register(struct rc_dev *dev) return 0; -out_ida: +out_put_device: + put_device(&dev->lirc_dev); ida_free(&lirc_ida, minor); return err; } diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c index 356a988dd6a13..2d15fdd5d999e 100644 --- a/drivers/media/test-drivers/vivid/vivid-cec.c +++ b/drivers/media/test-drivers/vivid/vivid-cec.c @@ -327,7 +327,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) char osd[14]; if (!cec_is_sink(adap)) - return -ENOMSG; + break; cec_ops_set_osd_string(msg, &disp_ctl, osd); switch (disp_ctl) { case CEC_OP_DISP_CTL_DEFAULT: @@ -348,7 +348,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) cec_transmit_msg(adap, &reply, false); break; } - break; + return 0; } case CEC_MSG_VENDOR_COMMAND_WITH_ID: { u32 vendor_id; @@ -379,7 +379,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) if (size == 1) { // Ignore even op values if (!(vendor_cmd[0] & 1)) - break; + return 0; reply.len = msg->len; memcpy(reply.msg + 1, msg->msg + 1, msg->len - 1); reply.msg[msg->len - 1]++; @@ -388,12 +388,10 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) CEC_OP_ABORT_INVALID_OP); } cec_transmit_msg(adap, &reply, false); - break; + return 0; } - default: - return -ENOMSG; } - return 0; + return -ENOMSG; } static const struct cec_adap_ops vivid_cec_adap_ops = { diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c index 30aa4ee958bde..ec9a3cd4784e1 100644 --- a/drivers/media/tuners/xc5000.c +++ b/drivers/media/tuners/xc5000.c @@ -1304,7 +1304,7 @@ static void xc5000_release(struct dvb_frontend *fe) mutex_lock(&xc5000_list_mutex); if (priv) { - cancel_delayed_work(&priv->timer_sleep); + cancel_delayed_work_sync(&priv->timer_sleep); hybrid_tuner_release_state(priv); } diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index fde5cc70bf79c..9c2dd64be6d38 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -135,6 +135,9 @@ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id) { struct uvc_entity *entity; + if (id == UVC_INVALID_ENTITY_ID) + return NULL; + list_for_each_entry(entity, &dev->entities, list) { if (entity->id == id) return entity; @@ -778,14 +781,27 @@ static const u8 uvc_media_transport_input_guid[16] = UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT; static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING; -static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id, - unsigned int num_pads, unsigned int extra_size) +static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type, + u16 id, unsigned int num_pads, + unsigned int extra_size) { struct uvc_entity *entity; unsigned int num_inputs; unsigned int size; unsigned int i; + /* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */ + if (id == 0) { + dev_err(&dev->intf->dev, "Found Unit with invalid ID 0\n"); + id = UVC_INVALID_ENTITY_ID; + } + + /* Per UVC 1.1+ spec 3.7.2, the ID is unique. */ + if (uvc_entity_by_id(dev, id)) { + dev_err(&dev->intf->dev, "Found multiple Units with ID %u\n", id); + id = UVC_INVALID_ENTITY_ID; + } + extra_size = roundup(extra_size, sizeof(*entity->pads)); if (num_pads) num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1; @@ -795,7 +811,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id, + num_inputs; entity = kzalloc(size, GFP_KERNEL); if (entity == NULL) - return NULL; + return ERR_PTR(-ENOMEM); entity->id = id; entity->type = type; @@ -907,10 +923,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev, break; } - unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3], - p + 1, 2*n); - if (unit == NULL) - return -ENOMEM; + unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT, + buffer[3], p + 1, 2 * n); + if (IS_ERR(unit)) + return PTR_ERR(unit); memcpy(unit->guid, &buffer[4], 16); unit->extension.bNumControls = buffer[20]; @@ -1019,10 +1035,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev, return -EINVAL; } - term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3], - 1, n + p); - if (term == NULL) - return -ENOMEM; + term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT, + buffer[3], 1, n + p); + if (IS_ERR(term)) + return PTR_ERR(term); if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) { term->camera.bControlSize = n; @@ -1078,10 +1094,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev, return 0; } - term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3], - 1, 0); - if (term == NULL) - return -ENOMEM; + term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT, + buffer[3], 1, 0); + if (IS_ERR(term)) + return PTR_ERR(term); memcpy(term->baSourceID, &buffer[7], 1); @@ -1100,9 +1116,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev, return -EINVAL; } - unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0); - if (unit == NULL) - return -ENOMEM; + unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], + p + 1, 0); + if (IS_ERR(unit)) + return PTR_ERR(unit); memcpy(unit->baSourceID, &buffer[5], p); @@ -1122,9 +1139,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev, return -EINVAL; } - unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n); - if (unit == NULL) - return -ENOMEM; + unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n); + if (IS_ERR(unit)) + return PTR_ERR(unit); memcpy(unit->baSourceID, &buffer[4], 1); unit->processing.wMaxMultiplier = @@ -1151,9 +1168,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev, return -EINVAL; } - unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n); - if (unit == NULL) - return -ENOMEM; + unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], + p + 1, n); + if (IS_ERR(unit)) + return PTR_ERR(unit); memcpy(unit->guid, &buffer[4], 16); unit->extension.bNumControls = buffer[20]; @@ -1293,9 +1311,10 @@ static int uvc_gpio_parse(struct uvc_device *dev) return dev_err_probe(&dev->intf->dev, irq, "No IRQ for privacy GPIO\n"); - unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1); - if (!unit) - return -ENOMEM; + unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT, + UVC_EXT_GPIO_UNIT_ID, 0, 1); + if (IS_ERR(unit)) + return PTR_ERR(unit); unit->gpio.gpio_privacy = gpio_privacy; unit->gpio.irq = irq; diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index 74ac2106f08e2..62d6129c88ece 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -41,6 +41,8 @@ #define UVC_EXT_GPIO_UNIT 0x7ffe #define UVC_EXT_GPIO_UNIT_ID 0x100 +#define UVC_INVALID_ENTITY_ID 0xffff + /* ------------------------------------------------------------------------ * Driver specific constants. */ diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c index e73dd330af477..d913fb901973f 100644 --- a/drivers/memory/samsung/exynos-srom.c +++ b/drivers/memory/samsung/exynos-srom.c @@ -121,20 +121,18 @@ static int exynos_srom_probe(struct platform_device *pdev) return -ENOMEM; srom->dev = dev; - srom->reg_base = of_iomap(np, 0); - if (!srom->reg_base) { + srom->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(srom->reg_base)) { dev_err(&pdev->dev, "iomap of exynos srom controller failed\n"); - return -ENOMEM; + return PTR_ERR(srom->reg_base); } platform_set_drvdata(pdev, srom); srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets, ARRAY_SIZE(exynos_srom_offsets)); - if (!srom->reg_offset) { - iounmap(srom->reg_base); + if (!srom->reg_offset) return -ENOMEM; - } for_each_child_of_node(np, child) { if (exynos_srom_configure_bank(srom, child)) { diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c index 992855bfda3e4..6daf33e07ea0a 100644 --- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c +++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c @@ -81,8 +81,9 @@ static struct mfd_cell chtdc_ti_dev[] = { static const struct regmap_config chtdc_ti_regmap_config = { .reg_bits = 8, .val_bits = 8, - .max_register = 128, - .cache_type = REGCACHE_NONE, + .max_register = 0xff, + /* The hardware does not support reading multiple registers at once */ + .use_single_read = true, }; static const struct regmap_irq chtdc_ti_irqs[] = { diff --git a/drivers/mfd/rz-mtu3.c b/drivers/mfd/rz-mtu3.c index f3dac4a29a832..9cdfef610398f 100644 --- a/drivers/mfd/rz-mtu3.c +++ b/drivers/mfd/rz-mtu3.c @@ -32,7 +32,7 @@ static const unsigned long rz_mtu3_8bit_ch_reg_offs[][13] = { [RZ_MTU3_CHAN_2] = MTU_8BIT_CH_1_2(0x204, 0x092, 0x205, 0x200, 0x20c, 0x201, 0x202), [RZ_MTU3_CHAN_3] = MTU_8BIT_CH_3_4_6_7(0x008, 0x093, 0x02c, 0x000, 0x04c, 0x002, 0x004, 0x005, 0x038), [RZ_MTU3_CHAN_4] = MTU_8BIT_CH_3_4_6_7(0x009, 0x094, 0x02d, 0x001, 0x04d, 0x003, 0x006, 0x007, 0x039), - [RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x1eb, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6), + [RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x895, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6), [RZ_MTU3_CHAN_6] = MTU_8BIT_CH_3_4_6_7(0x808, 0x893, 0x82c, 0x800, 0x84c, 0x802, 0x804, 0x805, 0x838), [RZ_MTU3_CHAN_7] = MTU_8BIT_CH_3_4_6_7(0x809, 0x894, 0x82d, 0x801, 0x84d, 0x803, 0x806, 0x807, 0x839), [RZ_MTU3_CHAN_8] = MTU_8BIT_CH_8(0x404, 0x098, 0x400, 0x406, 0x401, 0x402, 0x403) diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c index d34d58ce46db2..5f26ef733fd0d 100644 --- a/drivers/mfd/vexpress-sysreg.c +++ b/drivers/mfd/vexpress-sysreg.c @@ -90,6 +90,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) struct resource *mem; void __iomem *base; struct gpio_chip *mmc_gpio_chip; + int ret; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) @@ -110,7 +111,10 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI, NULL, NULL, NULL, NULL, 0); mmc_gpio_chip->ngpio = 2; - devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL); + + ret = devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL); + if (ret) + return ret; return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, vexpress_sysreg_cells, diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index e567a36275afc..d6c55c338b062 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -323,11 +323,11 @@ static void fastrpc_free_map(struct kref *ref) perm.vmid = QCOM_SCM_VMID_HLOS; perm.perm = QCOM_SCM_PERM_RWX; - err = qcom_scm_assign_mem(map->phys, map->size, + err = qcom_scm_assign_mem(map->phys, map->len, &src_perms, &perm, 1); if (err) { dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", - map->phys, map->size, err); + map->phys, map->len, err); return; } } @@ -363,32 +363,29 @@ static int fastrpc_map_get(struct fastrpc_map *map) static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, - struct fastrpc_map **ppmap, bool take_ref) + struct fastrpc_map **ppmap) { - struct fastrpc_session_ctx *sess = fl->sctx; struct fastrpc_map *map = NULL; + struct dma_buf *buf; int ret = -ENOENT; + buf = dma_buf_get(fd); + if (IS_ERR(buf)) + return PTR_ERR(buf); + spin_lock(&fl->lock); list_for_each_entry(map, &fl->maps, node) { - if (map->fd != fd) + if (map->fd != fd || map->buf != buf) continue; - if (take_ref) { - ret = fastrpc_map_get(map); - if (ret) { - dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n", - __func__, fd, ret); - break; - } - } - *ppmap = map; ret = 0; break; } spin_unlock(&fl->lock); + dma_buf_put(buf); + return ret; } @@ -752,16 +749,14 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = { .release = fastrpc_release, }; -static int fastrpc_map_create(struct fastrpc_user *fl, int fd, +static int fastrpc_map_attach(struct fastrpc_user *fl, int fd, u64 len, u32 attr, struct fastrpc_map **ppmap) { struct fastrpc_session_ctx *sess = fl->sctx; struct fastrpc_map *map = NULL; struct sg_table *table; - int err = 0; - - if (!fastrpc_map_lookup(fl, fd, ppmap, true)) - return 0; + struct scatterlist *sgl = NULL; + int err = 0, sgl_index = 0; map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) @@ -798,7 +793,15 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, map->phys = sg_dma_address(map->table->sgl); map->phys += ((u64)fl->sctx->sid << 32); } - map->size = len; + for_each_sg(map->table->sgl, sgl, map->table->nents, + sgl_index) + map->size += sg_dma_len(sgl); + if (len > map->size) { + dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n", + len, map->size); + err = -EINVAL; + goto map_err; + } map->va = sg_virt(map->table->sgl); map->len = len; @@ -815,10 +818,10 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; dst_perms[1].perm = QCOM_SCM_PERM_RWX; map->attr = attr; - err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2); + err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2); if (err) { dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", - map->phys, map->size, err); + map->phys, map->len, err); goto map_err; } } @@ -839,6 +842,24 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, return err; } +static int fastrpc_map_create(struct fastrpc_user *fl, int fd, + u64 len, u32 attr, struct fastrpc_map **ppmap) +{ + struct fastrpc_session_ctx *sess = fl->sctx; + int err = 0; + + if (!fastrpc_map_lookup(fl, fd, ppmap)) { + if (!fastrpc_map_get(*ppmap)) + return 0; + dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n", + __func__, fd); + } + + err = fastrpc_map_attach(fl, fd, len, attr, ppmap); + + return err; +} + /* * Fastrpc payload buffer with metadata looks like: * @@ -911,8 +932,12 @@ static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx) ctx->args[i].length == 0) continue; - err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, - ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); + if (i < ctx->nbufs) + err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); + else + err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); if (err) { dev_err(dev, "Error Creating map %d\n", err); return -EINVAL; @@ -1071,6 +1096,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, struct fastrpc_phy_page *pages; u64 *fdlist; int i, inbufs, outbufs, handles; + int ret = 0; inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); @@ -1086,23 +1112,26 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, u64 len = rpra[i].buf.len; if (!kernel) { - if (copy_to_user((void __user *)dst, src, len)) - return -EFAULT; + if (copy_to_user((void __user *)dst, src, len)) { + ret = -EFAULT; + goto cleanup_fdlist; + } } else { memcpy(dst, src, len); } } } +cleanup_fdlist: /* Clean up fdlist which is updated by DSP */ for (i = 0; i < FASTRPC_MAX_FDLIST; i++) { if (!fdlist[i]) break; - if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false)) + if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap)) fastrpc_map_put(mmap); } - return 0; + return ret; } static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, @@ -2044,7 +2073,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) args[0].length = sizeof(req_msg); pages.addr = map->phys; - pages.size = map->size; + pages.size = map->len; args[1].ptr = (u64) (uintptr_t) &pages; args[1].length = sizeof(pages); @@ -2059,7 +2088,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); if (err) { dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n", - req.fd, req.vaddrin, map->size); + req.fd, req.vaddrin, map->len); goto err_invoke; } @@ -2072,7 +2101,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) if (copy_to_user((void __user *)argp, &req, sizeof(req))) { /* unmap the memory and release the buffer */ req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr; - req_unmap.length = map->size; + req_unmap.length = map->len; fastrpc_req_mem_unmap_impl(fl, &req_unmap); return -EFAULT; } diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c index 500b1feaf1f6f..fd7d5cd50d396 100644 --- a/drivers/misc/genwqe/card_ddcb.c +++ b/drivers/misc/genwqe/card_ddcb.c @@ -923,7 +923,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, } if (cmd->asv_length > DDCB_ASV_LENGTH) { dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n", - __func__, cmd->asiv_length); + __func__, cmd->asv_length); return -EINVAL; } rc = __genwqe_enqueue_ddcb(cd, req, f_flags); diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c index 0159276656780..00ed2147113e6 100644 --- a/drivers/misc/lkdtm/fortify.c +++ b/drivers/misc/lkdtm/fortify.c @@ -44,6 +44,9 @@ static void lkdtm_FORTIFY_STR_MEMBER(void) char *src; src = kmalloc(size, GFP_KERNEL); + if (!src) + return; + strscpy(src, "over ten bytes", size); size = strlen(src) + 1; @@ -109,6 +112,9 @@ static void lkdtm_FORTIFY_MEM_MEMBER(void) char *src; src = kmalloc(size, GFP_KERNEL); + if (!src) + return; + strscpy(src, "over ten bytes", size); size = strlen(src) + 1; diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index bc40b940ae214..a4f75dc369292 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -120,6 +120,8 @@ #define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */ #define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */ +#define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 3f9c60b579ae4..bc0fc584a8e46 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -127,6 +127,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 1d08009f2bd83..08b8276e1da93 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2876,15 +2876,15 @@ static int mmc_route_rpmb_frames(struct device *dev, u8 *req, return -ENOMEM; if (write) { - struct rpmb_frame *frm = (struct rpmb_frame *)resp; + struct rpmb_frame *resp_frm = (struct rpmb_frame *)resp; /* Send write request frame(s) */ set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 1 | MMC_CMD23_ARG_REL_WR, req, req_len); /* Send result request frame */ - memset(frm, 0, sizeof(*frm)); - frm->req_resp = cpu_to_be16(RPMB_RESULT_READ); + memset(resp_frm, 0, sizeof(*resp_frm)); + resp_frm->req_resp = cpu_to_be16(RPMB_RESULT_READ); set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp, resp_len); diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 4b19b8a16b096..2e37700ae36e7 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -945,7 +945,11 @@ static void mmc_sdio_remove(struct mmc_host *host) */ static int mmc_sdio_alive(struct mmc_host *host) { - return mmc_select_card(host->card); + if (!mmc_host_is_spi(host)) + return mmc_select_card(host->card); + else + return mmc_io_rw_direct(host->card, 0, 0, SDIO_CCCR_CCCR, 0, + NULL); } /* diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 47443fb5eb336..f42d5f9c48c1b 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -563,7 +563,7 @@ mmc_spi_setup_data_message(struct mmc_spi_host *host, bool multiple, bool write) * the next token (next data block, or STOP_TRAN). We can try to * minimize I/O ops by using a single read to collect end-of-busy. */ - if (multiple || write) { + if (write) { t = &host->early_status; memset(t, 0, sizeof(*t)); t->len = write ? sizeof(scratch->status) : 1; diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index be1505e8c536e..7759531ccca70 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -433,6 +433,13 @@ static const struct sdhci_cdns_drv_data sdhci_elba_drv_data = { }, }; +static const struct sdhci_cdns_drv_data sdhci_eyeq_drv_data = { + .pltfm_data = { + .ops = &sdhci_cdns_ops, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + }, +}; + static const struct sdhci_cdns_drv_data sdhci_cdns_drv_data = { .pltfm_data = { .ops = &sdhci_cdns_ops, @@ -595,6 +602,10 @@ static const struct of_device_id sdhci_cdns_match[] = { .compatible = "amd,pensando-elba-sd4hc", .data = &sdhci_elba_drv_data, }, + { + .compatible = "mobileye,eyeq-sd4hc", + .data = &sdhci_eyeq_drv_data, + }, { .compatible = "cdns,sd4hc" }, { /* sentinel */ } }; diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c index 485d5ca399513..988e33f279704 100644 --- a/drivers/most/most_usb.c +++ b/drivers/most/most_usb.c @@ -929,6 +929,10 @@ static void release_mdev(struct device *dev) { struct most_dev *mdev = to_mdev_from_dev(dev); + kfree(mdev->busy_urbs); + kfree(mdev->cap); + kfree(mdev->conf); + kfree(mdev->ep_address); kfree(mdev); } /** @@ -1093,7 +1097,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id) err_free_conf: kfree(mdev->conf); err_free_mdev: - put_device(&mdev->dev); + kfree(mdev); return ret; } @@ -1121,13 +1125,6 @@ static void hdm_disconnect(struct usb_interface *interface) if (mdev->dci) device_unregister(&mdev->dci->dev); most_deregister_interface(&mdev->iface); - - kfree(mdev->busy_urbs); - kfree(mdev->cap); - kfree(mdev->conf); - kfree(mdev->ep_address); - put_device(&mdev->dci->dev); - put_device(&mdev->dev); } static int hdm_suspend(struct usb_interface *interface, pm_message_t message) diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 5b02119d8ba23..543a0be9dc645 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -1858,7 +1858,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc) static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc) { - struct device_node *np, *nand_np; + struct device_node *np; struct device *dev = nc->dev; int ret, reg_cells; u32 val; @@ -1885,7 +1885,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc) reg_cells += val; - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { struct atmel_nand *nand; nand = atmel_nand_create(nc, nand_np, reg_cells); diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index fe5912d31beea..b0a70badf3eb7 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -876,10 +876,14 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev, if (!of_property_read_u32(np, "bank-width", &val)) { if (val == 2) { nand->options |= NAND_BUSWIDTH_16; - } else if (val != 1) { + } else if (val == 1) { + nand->options |= NAND_BUSWIDTH_AUTO; + } else { dev_err(&pdev->dev, "invalid bank-width %u\n", val); return -EINVAL; } + } else { + nand->options |= NAND_BUSWIDTH_AUTO; } if (of_property_read_bool(np, "nand-skip-bbtscan")) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 00204e42de2e7..f17a170d1be47 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -322,9 +322,9 @@ static bool bond_sk_check(struct bonding *bond) } } -static bool bond_xdp_check(struct bonding *bond) +bool bond_xdp_check(struct bonding *bond, int mode) { - switch (BOND_MODE(bond)) { + switch (mode) { case BOND_MODE_ROUNDROBIN: case BOND_MODE_ACTIVEBACKUP: return true; @@ -1930,7 +1930,7 @@ void bond_xdp_set_features(struct net_device *bond_dev) ASSERT_RTNL(); - if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) { + if (!bond_xdp_check(bond, BOND_MODE(bond)) || !bond_has_slaves(bond)) { xdp_clear_features_flag(bond_dev); return; } @@ -2951,7 +2951,7 @@ static void bond_mii_monitor(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, mii_work.work); - bool should_notify_peers = false; + bool should_notify_peers; bool commit; unsigned long delay; struct slave *slave; @@ -2963,30 +2963,33 @@ static void bond_mii_monitor(struct work_struct *work) goto re_arm; rcu_read_lock(); + should_notify_peers = bond_should_notify_peers(bond); commit = !!bond_miimon_inspect(bond); - if (bond->send_peer_notif) { - rcu_read_unlock(); - if (rtnl_trylock()) { - bond->send_peer_notif--; - rtnl_unlock(); - } - } else { - rcu_read_unlock(); - } - if (commit) { + rcu_read_unlock(); + + if (commit || bond->send_peer_notif) { /* Race avoidance with bond_close cancel of workqueue */ if (!rtnl_trylock()) { delay = 1; - should_notify_peers = false; goto re_arm; } - bond_for_each_slave(bond, slave, iter) { - bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER); + if (commit) { + bond_for_each_slave(bond, slave, iter) { + bond_commit_link_state(slave, + BOND_SLAVE_NOTIFY_LATER); + } + bond_miimon_commit(bond); + } + + if (bond->send_peer_notif) { + bond->send_peer_notif--; + if (should_notify_peers) + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, + bond->dev); } - bond_miimon_commit(bond); rtnl_unlock(); /* might sleep, hold no other locks */ } @@ -2994,13 +2997,6 @@ static void bond_mii_monitor(struct work_struct *work) re_arm: if (bond->params.miimon) queue_delayed_work(bond->wq, &bond->mii_work, delay); - - if (should_notify_peers) { - if (!rtnl_trylock()) - return; - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); - rtnl_unlock(); - } } static int bond_upper_dev_walk(struct net_device *upper, @@ -5703,8 +5699,11 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog, ASSERT_RTNL(); - if (!bond_xdp_check(bond)) + if (!bond_xdp_check(bond, BOND_MODE(bond))) { + BOND_NL_ERR(dev, extack, + "No native XDP support for the current bonding mode"); return -EOPNOTSUPP; + } old_prog = bond->xdp_prog; bond->xdp_prog = prog; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index e27d913b487b5..28c53f1b13826 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -868,6 +868,9 @@ static bool bond_set_xfrm_features(struct bonding *bond) static int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval) { + if (bond->xdp_prog && !bond_xdp_check(bond, newval->value)) + return -EOPNOTSUPP; + if (!bond_mode_uses_arp(newval->value)) { if (bond->params.arp_interval) { netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c index bfc60eb33dc37..333ad42ea73bc 100644 --- a/drivers/net/can/bxcan.c +++ b/drivers/net/can/bxcan.c @@ -842,7 +842,7 @@ static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb, u32 id; int i, j; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (bxcan_tx_busy(priv)) diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index abe8dc051d94f..77d165ed0d534 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -285,7 +285,9 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], } if (data[IFLA_CAN_RESTART_MS]) { - if (!priv->do_set_mode) { + unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); + + if (restart_ms != 0 && !priv->do_set_mode) { NL_SET_ERR_MSG(extack, "Device doesn't support restart from Bus Off"); return -EOPNOTSUPP; @@ -294,7 +296,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], /* Do not allow changing restart delay while running */ if (dev->flags & IFF_UP) return -EBUSY; - priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); + priv->restart_ms = restart_ms; } if (data[IFLA_CAN_RESTART]) { diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c index c80032bc1a521..73e66f9a3781c 100644 --- a/drivers/net/can/esd/esdacc.c +++ b/drivers/net/can/esd/esdacc.c @@ -254,7 +254,7 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev) u32 acc_id; u32 acc_dlc; - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* Access core->tx_fifo_tail only once because it may be changed diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index dbcf17fb3ef25..f31a91ec7a6d0 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -812,6 +812,9 @@ static int m_can_handle_state_change(struct net_device *dev, u32 timestamp = 0; switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: + cdev->can.state = CAN_STATE_ERROR_ACTIVE; + break; case CAN_STATE_ERROR_WARNING: /* error warning state */ cdev->can.can_stats.error_warning++; @@ -841,6 +844,12 @@ static int m_can_handle_state_change(struct net_device *dev, __m_can_get_berr_counter(dev, &bec); switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; + cf->data[1] = CAN_ERR_CRTL_ACTIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; case CAN_STATE_ERROR_WARNING: /* error warning state */ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; @@ -877,30 +886,33 @@ static int m_can_handle_state_change(struct net_device *dev, return 1; } -static int m_can_handle_state_errors(struct net_device *dev, u32 psr) +static enum can_state +m_can_state_get_by_psr(struct m_can_classdev *cdev) { - struct m_can_classdev *cdev = netdev_priv(dev); - int work_done = 0; + u32 reg_psr; - if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) { - netdev_dbg(dev, "entered error warning state\n"); - work_done += m_can_handle_state_change(dev, - CAN_STATE_ERROR_WARNING); - } + reg_psr = m_can_read(cdev, M_CAN_PSR); - if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) { - netdev_dbg(dev, "entered error passive state\n"); - work_done += m_can_handle_state_change(dev, - CAN_STATE_ERROR_PASSIVE); - } + if (reg_psr & PSR_BO) + return CAN_STATE_BUS_OFF; + if (reg_psr & PSR_EP) + return CAN_STATE_ERROR_PASSIVE; + if (reg_psr & PSR_EW) + return CAN_STATE_ERROR_WARNING; - if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "entered error bus off state\n"); - work_done += m_can_handle_state_change(dev, - CAN_STATE_BUS_OFF); - } + return CAN_STATE_ERROR_ACTIVE; +} - return work_done; +static int m_can_handle_state_errors(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + enum can_state new_state; + + new_state = m_can_state_get_by_psr(cdev); + if (new_state == cdev->can.state) + return 0; + + return m_can_handle_state_change(dev, new_state); } static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) @@ -1031,8 +1043,7 @@ static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus) } if (irqstatus & IR_ERR_STATE) - work_done += m_can_handle_state_errors(dev, - m_can_read(cdev, M_CAN_PSR)); + work_done += m_can_handle_state_errors(dev); if (irqstatus & IR_ERR_BUS_30X) work_done += m_can_handle_bus_errors(dev, irqstatus, @@ -1606,7 +1617,7 @@ static int m_can_start(struct net_device *dev) netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0), cdev->tx_max_coalesced_frames); - cdev->can.state = CAN_STATE_ERROR_ACTIVE; + cdev->can.state = m_can_state_get_by_psr(cdev); m_can_enable_all_interrupts(cdev); @@ -1785,6 +1796,13 @@ static void m_can_stop(struct net_device *dev) /* set the state as STOPPED */ cdev->can.state = CAN_STATE_STOPPED; + + if (cdev->ops->deinit) { + ret = cdev->ops->deinit(cdev); + if (ret) + netdev_err(dev, "failed to deinitialize: %pe\n", + ERR_PTR(ret)); + } } static int m_can_close(struct net_device *dev) @@ -2467,6 +2485,7 @@ int m_can_class_suspend(struct device *dev) { struct m_can_classdev *cdev = dev_get_drvdata(dev); struct net_device *ndev = cdev->net; + int ret = 0; if (netif_running(ndev)) { netif_stop_queue(ndev); @@ -2479,18 +2498,20 @@ int m_can_class_suspend(struct device *dev) if (cdev->pm_wake_source) { hrtimer_cancel(&cdev->hrtimer); m_can_write(cdev, M_CAN_IE, IR_RF0N); + + if (cdev->ops->deinit) + ret = cdev->ops->deinit(cdev); } else { m_can_stop(ndev); } m_can_clk_stop(cdev); + cdev->can.state = CAN_STATE_SLEEPING; } pinctrl_pm_select_sleep_state(dev); - cdev->can.state = CAN_STATE_SLEEPING; - - return 0; + return ret; } EXPORT_SYMBOL_GPL(m_can_class_suspend); @@ -2498,14 +2519,11 @@ int m_can_class_resume(struct device *dev) { struct m_can_classdev *cdev = dev_get_drvdata(dev); struct net_device *ndev = cdev->net; + int ret = 0; pinctrl_pm_select_default_state(dev); - cdev->can.state = CAN_STATE_ERROR_ACTIVE; - if (netif_running(ndev)) { - int ret; - ret = m_can_clk_start(cdev); if (ret) return ret; @@ -2518,6 +2536,12 @@ int m_can_class_resume(struct device *dev) * again. */ cdev->active_interrupts |= IR_RF0N | IR_TEFN; + + if (cdev->ops->init) + ret = cdev->ops->init(cdev); + + cdev->can.state = m_can_state_get_by_psr(cdev); + m_can_write(cdev, M_CAN_IE, cdev->active_interrupts); } else { ret = m_can_start(ndev); @@ -2531,7 +2555,7 @@ int m_can_class_resume(struct device *dev) netif_start_queue(ndev); } - return 0; + return ret; } EXPORT_SYMBOL_GPL(m_can_class_resume); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index ef39e8e527ab6..bd4746c63af3f 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -68,6 +68,7 @@ struct m_can_ops { int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset, const void *val, size_t val_count); int (*init)(struct m_can_classdev *cdev); + int (*deinit)(struct m_can_classdev *cdev); }; struct m_can_tx_op { diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index b832566efda04..057eaa7b8b4b2 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -180,7 +180,7 @@ static void m_can_plat_remove(struct platform_device *pdev) struct m_can_classdev *mcan_class = &priv->cdev; m_can_class_unregister(mcan_class); - + pm_runtime_disable(mcan_class->dev); m_can_class_free_dev(mcan_class->net); } diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 2b7dd359f27b7..8569178b66df7 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -861,7 +861,6 @@ static int __maybe_unused rcar_can_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct rcar_can_priv *priv = netdev_priv(ndev); - u16 ctlr; int err; if (!netif_running(ndev)) @@ -873,12 +872,7 @@ static int __maybe_unused rcar_can_resume(struct device *dev) return err; } - ctlr = readw(&priv->regs->ctlr); - ctlr &= ~RCAR_CAN_CTLR_SLPM; - writew(ctlr, &priv->regs->ctlr); - ctlr &= ~RCAR_CAN_CTLR_CANM; - writew(ctlr, &priv->regs->ctlr); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + rcar_can_start(ndev); netif_device_attach(ndev); netif_start_queue(ndev); diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index aa3df0d05b853..24dd15c917228 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -732,9 +732,6 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) /* Reset Global error flags */ rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0); - /* Set the controller into appropriate mode */ - rcar_canfd_set_mode(gpriv); - /* Transition all Channels to reset mode */ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { rcar_canfd_clear_bit(gpriv->base, @@ -754,6 +751,10 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) return err; } } + + /* Set the controller into appropriate mode */ + rcar_canfd_set_mode(gpriv); + return 0; } diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c index 865a15e033a9e..12200dcfd3389 100644 --- a/drivers/net/can/rockchip/rockchip_canfd-tx.c +++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c @@ -72,7 +72,7 @@ netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev) int err; u8 i; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (!netif_subqueue_maybe_stop(priv->ndev, 0, diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 1b9501ee10deb..c9eba1d37b0eb 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -545,8 +545,6 @@ static int hi3110_stop(struct net_device *net) priv->force_quit = 1; free_irq(spi->irq, priv); - destroy_workqueue(priv->wq); - priv->wq = NULL; mutex_lock(&priv->hi3110_lock); @@ -771,34 +769,23 @@ static int hi3110_open(struct net_device *net) goto out_close; } - priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, - 0); - if (!priv->wq) { - ret = -ENOMEM; - goto out_free_irq; - } - INIT_WORK(&priv->tx_work, hi3110_tx_work_handler); - INIT_WORK(&priv->restart_work, hi3110_restart_work_handler); - ret = hi3110_hw_reset(spi); if (ret) - goto out_free_wq; + goto out_free_irq; ret = hi3110_setup(net); if (ret) - goto out_free_wq; + goto out_free_irq; ret = hi3110_set_normal_mode(spi); if (ret) - goto out_free_wq; + goto out_free_irq; netif_wake_queue(net); mutex_unlock(&priv->hi3110_lock); return 0; - out_free_wq: - destroy_workqueue(priv->wq); out_free_irq: free_irq(spi->irq, priv); hi3110_hw_sleep(spi); @@ -813,6 +800,7 @@ static const struct net_device_ops hi3110_netdev_ops = { .ndo_open = hi3110_open, .ndo_stop = hi3110_stop, .ndo_start_xmit = hi3110_hard_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops hi3110_ethtool_ops = { @@ -909,6 +897,15 @@ static int hi3110_can_probe(struct spi_device *spi) if (ret) goto out_clk; + priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, + 0); + if (!priv->wq) { + ret = -ENOMEM; + goto out_clk; + } + INIT_WORK(&priv->tx_work, hi3110_tx_work_handler); + INIT_WORK(&priv->restart_work, hi3110_restart_work_handler); + priv->spi = spi; mutex_init(&priv->hi3110_lock); @@ -944,6 +941,8 @@ static int hi3110_can_probe(struct spi_device *spi) return 0; error_probe: + destroy_workqueue(priv->wq); + priv->wq = NULL; hi3110_power_enable(priv->power, 0); out_clk: @@ -964,6 +963,9 @@ static void hi3110_can_remove(struct spi_device *spi) hi3110_power_enable(priv->power, 0); + destroy_workqueue(priv->wq); + priv->wq = NULL; + clk_disable_unprepare(priv->clk); free_candev(net); diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 4311c1f0eafd8..30b30fdbcae9c 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -768,6 +768,7 @@ static const struct net_device_ops sun4ican_netdev_ops = { .ndo_open = sun4ican_open, .ndo_stop = sun4ican_close, .ndo_start_xmit = sun4ican_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops sun4ican_ethtool_ops = { diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 71f24dc0a9271..4fc9bed0d2e1e 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -7,7 +7,7 @@ * * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. * Copyright (c) 2020 ETAS K.K.. All rights reserved. - * Copyright (c) 2020-2022 Vincent Mailhol + * Copyright (c) 2020-2025 Vincent Mailhol */ #include @@ -1977,6 +1977,7 @@ static const struct net_device_ops es58x_netdev_ops = { .ndo_stop = es58x_stop, .ndo_start_xmit = es58x_start_xmit, .ndo_eth_ioctl = can_eth_ioctl_hwts, + .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops es58x_ethtool_ops = { diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index b6f4de375df75..fb904dc28b8ee 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -286,11 +286,6 @@ struct gs_host_frame { #define GS_MAX_RX_URBS 30 #define GS_NAPI_WEIGHT 32 -/* Maximum number of interfaces the driver supports per device. - * Current hardware only supports 3 interfaces. The future may vary. - */ -#define GS_MAX_INTF 3 - struct gs_tx_context { struct gs_can *dev; unsigned int echo_id; @@ -321,7 +316,6 @@ struct gs_can { /* usb interface struct */ struct gs_usb { - struct gs_can *canch[GS_MAX_INTF]; struct usb_anchor rx_submitted; struct usb_device *udev; @@ -333,9 +327,11 @@ struct gs_usb { unsigned int hf_size_rx; u8 active_channels; + u8 channel_cnt; unsigned int pipe_in; unsigned int pipe_out; + struct gs_can *canch[] __counted_by(channel_cnt); }; /* 'allocate' a tx context. @@ -596,7 +592,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) } /* device reports out of range channel id */ - if (hf->channel >= GS_MAX_INTF) + if (hf->channel >= parent->channel_cnt) goto device_detach; dev = parent->canch[hf->channel]; @@ -696,7 +692,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) /* USB failure take down all interfaces */ if (rc == -ENODEV) { device_detach: - for (rc = 0; rc < GS_MAX_INTF; rc++) { + for (rc = 0; rc < parent->channel_cnt; rc++) { if (parent->canch[rc]) netif_device_detach(parent->canch[rc]->netdev); } @@ -1246,6 +1242,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */ netdev->dev_id = channel; + netdev->dev_port = channel; /* dev setup */ strcpy(dev->bt_const.name, KBUILD_MODNAME); @@ -1457,17 +1454,19 @@ static int gs_usb_probe(struct usb_interface *intf, icount = dconf.icount + 1; dev_info(&intf->dev, "Configuring for %u interfaces\n", icount); - if (icount > GS_MAX_INTF) { + if (icount > type_max(parent->channel_cnt)) { dev_err(&intf->dev, "Driver cannot handle more that %u CAN interfaces\n", - GS_MAX_INTF); + type_max(parent->channel_cnt)); return -EINVAL; } - parent = kzalloc(sizeof(*parent), GFP_KERNEL); + parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL); if (!parent) return -ENOMEM; + parent->channel_cnt = icount; + init_usb_anchor(&parent->rx_submitted); usb_set_intfdata(intf, parent); @@ -1528,7 +1527,7 @@ static void gs_usb_disconnect(struct usb_interface *intf) return; } - for (i = 0; i < GS_MAX_INTF; i++) + for (i = 0; i < parent->channel_cnt; i++) if (parent->canch[i]) gs_destroy_candev(parent->canch[i]); diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 41c0a1c399bf3..1f9b915094e64 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -761,6 +761,7 @@ static const struct net_device_ops mcba_netdev_ops = { .ndo_open = mcba_usb_open, .ndo_stop = mcba_usb_close, .ndo_start_xmit = mcba_usb_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops mcba_ethtool_ops = { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 59f7cd8ceb397..69c44f675ff1c 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -111,7 +111,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now) u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1; if (time_ref->ts_dev_2 < time_ref->ts_dev_1) - delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1; + delta_ts &= (1ULL << time_ref->adapter->ts_used_bits) - 1; time_ref->ts_total += delta_ts; } diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index fcd4505f49252..b1f18a840da77 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -685,18 +685,27 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add) return 0; } -static int gswip_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static int gswip_port_setup(struct dsa_switch *ds, int port) { struct gswip_priv *priv = ds->priv; int err; if (!dsa_is_cpu_port(ds, port)) { - u32 mdio_phy = 0; - err = gswip_add_single_port_br(priv, port, true); if (err) return err; + } + + return 0; +} + +static int gswip_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct gswip_priv *priv = ds->priv; + + if (!dsa_is_cpu_port(ds, port)) { + u32 mdio_phy = 0; if (phydev) mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; @@ -1359,8 +1368,9 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port, int i; int err; + /* Operation not supported on the CPU port, don't throw errors */ if (!bridge) - return -EINVAL; + return 0; for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { if (priv->vlans[i].bridge == bridge) { @@ -1829,6 +1839,7 @@ static const struct phylink_mac_ops gswip_phylink_mac_ops = { static const struct dsa_switch_ops gswip_xrx200_switch_ops = { .get_tag_protocol = gswip_get_tag_protocol, .setup = gswip_setup, + .port_setup = gswip_port_setup, .port_enable = gswip_port_enable, .port_disable = gswip_port_disable, .port_bridge_join = gswip_port_bridge_join, diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 60fb35ec4b15a..0b2e257b591f0 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -869,7 +869,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev) static u32 ena_get_rxfh_key_size(struct net_device *netdev) { - return ENA_HASH_KEY_SIZE; + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_rss *rss = &adapter->ena_dev->rss; + + return rss->hash_key ? ENA_HASH_KEY_SIZE : 0; } static int ena_indirection_table_set(struct ena_adapter *adapter, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 8bc49259d71af..32a6d52614242 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1172,7 +1172,6 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) static int xgbe_phy_reset(struct xgbe_prv_data *pdata) { - pdata->phy_link = -1; pdata->phy_speed = SPEED_UNKNOWN; return pdata->phy_if.phy_reset(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index ed76a8df6ec6e..75e9cb3fc7aa6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1664,6 +1664,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_FULL; } + pdata->phy_link = 0; pdata->phy.link = 0; pdata->phy.pause_autoneg = pdata->pause_autoneg; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index d2ca90407cce7..8057350236c5e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -244,7 +244,7 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions, offset < offset_of_ip6_daddr + 16) { actions->nat.src_xlate = false; idx = (offset - offset_of_ip6_daddr) / 4; - actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); + actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val); } else { netdev_err(bp->dev, "%s: IPv6_hdr: Invalid pedit field\n", diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 717e110d23c91..dc170feee8ad7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -5803,7 +5803,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) u32 current_speed = SPEED_UNKNOWN; u8 current_duplex = DUPLEX_UNKNOWN; bool current_link_up = false; - u32 local_adv, remote_adv, sgsr; + u32 local_adv = 0, remote_adv = 0, sgsr; if ((tg3_asic_rev(tp) == ASIC_REV_5719 || tg3_asic_rev(tp) == ASIC_REV_5720) && @@ -5944,9 +5944,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) else current_duplex = DUPLEX_HALF; - local_adv = 0; - remote_adv = 0; - if (bmcr & BMCR_ANENABLE) { u32 common; diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 2c1b551e14423..7c9658a4ec4b5 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -498,25 +498,34 @@ static int alloc_list(struct net_device *dev) for (i = 0; i < RX_RING_SIZE; i++) { /* Allocated fixed size of skbuff */ struct sk_buff *skb; + dma_addr_t addr; skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; - if (!skb) { - free_list(dev); - return -ENOMEM; - } + if (!skb) + goto err_free_list; + + addr = dma_map_single(&np->pdev->dev, skb->data, + np->rx_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&np->pdev->dev, addr)) + goto err_kfree_skb; np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + ((i + 1) % RX_RING_SIZE) * sizeof(struct netdev_desc)); /* Rubicon now supports 40 bits of addressing space. */ - np->rx_ring[i].fraginfo = - cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data, - np->rx_buf_sz, DMA_FROM_DEVICE)); + np->rx_ring[i].fraginfo = cpu_to_le64(addr); np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); } return 0; + +err_kfree_skb: + dev_kfree_skb(np->rx_skbuff[i]); + np->rx_skbuff[i] = NULL; +err_free_list: + free_list(dev); + return -ENOMEM; } static void rio_hw_init(struct net_device *dev) @@ -953,15 +962,18 @@ receive_packet (struct net_device *dev) } else { struct sk_buff *skb; + skb = NULL; /* Small skbuffs for short packets */ - if (pkt_len > copy_thresh) { + if (pkt_len <= copy_thresh) + skb = netdev_alloc_skb_ip_align(dev, pkt_len); + if (!skb) { dma_unmap_single(&np->pdev->dev, desc_to_dma(desc), np->rx_buf_sz, DMA_FROM_DEVICE); skb_put (skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; - } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { + } else { dma_sync_single_for_cpu(&np->pdev->dev, desc_to_dma(desc), np->rx_buf_sz, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index c744e10e64033..f56a14e09d4a3 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1077,8 +1077,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, dma_addr_t addr; buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); - aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, - DPAA2_ETH_TX_BUF_ALIGN); + aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN); if (aligned_start >= skb->head) buffer_start = aligned_start; else diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index d8272b7a55fcb..749b65aab14a9 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1246,6 +1246,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, /* next descriptor to process */ i = rx_ring->next_to_clean; + enetc_lock_mdio(); + while (likely(rx_frm_cnt < work_limit)) { union enetc_rx_bd *rxbd; struct sk_buff *skb; @@ -1281,7 +1283,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, rx_byte_cnt += skb->len + ETH_HLEN; rx_frm_cnt++; + enetc_unlock_mdio(); napi_gro_receive(napi, skb); + enetc_lock_mdio(); } rx_ring->next_to_clean = i; @@ -1289,6 +1293,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, rx_ring->stats.packets += rx_frm_cnt; rx_ring->stats.bytes += rx_byte_cnt; + enetc_unlock_mdio(); + return rx_frm_cnt; } @@ -1598,6 +1604,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, /* next descriptor to process */ i = rx_ring->next_to_clean; + enetc_lock_mdio(); + while (likely(rx_frm_cnt < work_limit)) { union enetc_rx_bd *rxbd, *orig_rxbd; int orig_i, orig_cleaned_cnt; @@ -1657,7 +1665,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, if (unlikely(!skb)) goto out; + enetc_unlock_mdio(); napi_gro_receive(napi, skb); + enetc_lock_mdio(); break; case XDP_TX: tx_ring = priv->xdp_tx_ring[rx_ring->index]; @@ -1692,7 +1702,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, } break; case XDP_REDIRECT: + enetc_unlock_mdio(); err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); + enetc_lock_mdio(); if (unlikely(err)) { enetc_xdp_drop(rx_ring, orig_i, i); rx_ring->stats.xdp_redirect_failures++; @@ -1712,8 +1724,11 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, rx_ring->stats.packets += rx_frm_cnt; rx_ring->stats.bytes += rx_byte_cnt; - if (xdp_redirect_frm_cnt) + if (xdp_redirect_frm_cnt) { + enetc_unlock_mdio(); xdp_do_flush(); + enetc_lock_mdio(); + } if (xdp_tx_frm_cnt) enetc_update_tx_ring_tail(tx_ring); @@ -1722,6 +1737,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - rx_ring->xdp.xdp_tx_in_flight); + enetc_unlock_mdio(); + return rx_frm_cnt; } @@ -1740,6 +1757,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) for (i = 0; i < v->count_tx_rings; i++) if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) complete = false; + enetc_unlock_mdio(); prog = rx_ring->xdp.prog; if (prog) @@ -1751,10 +1769,8 @@ static int enetc_poll(struct napi_struct *napi, int budget) if (work_done) v->rx_napi_work = true; - if (!complete) { - enetc_unlock_mdio(); + if (!complete) return budget; - } napi_complete_done(napi, work_done); @@ -1763,6 +1779,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) v->rx_napi_work = false; + enetc_lock_mdio(); /* enable interrupts */ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index fb7d98d577839..bf72b2825fa68 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -41,7 +41,7 @@ struct enetc_tx_swbd { }; #define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE -#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */ +#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1) #define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ #define ENETC_RXB_DMA_SIZE \ (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 0bd814251d56e..d144494f97e91 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -131,7 +131,7 @@ static const struct fec_devinfo fec_mvf600_info = { FEC_QUIRK_HAS_MDIO_C45, }; -static const struct fec_devinfo fec_imx6x_info = { +static const struct fec_devinfo fec_imx6sx_info = { .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | @@ -196,7 +196,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, - { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, }, { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 026f7270a54de..fb5d12a87872e 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -479,10 +479,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) "missing 'reg' property in node %pOF\n", tbi); err = -EBUSY; + of_node_put(tbi); goto error; } set_tbipa(*prop, pdev, data->get_tbipa, priv->map, &res); + of_node_put(tbi); } } diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index b22bb0ae9b9d1..b8de97343ad3e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1277,7 +1277,8 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); -int i40e_count_filters(struct i40e_vsi *vsi); +int i40e_count_all_filters(struct i40e_vsi *vsi); +int i40e_count_active_filters(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); static inline bool i40e_is_sw_dcb(struct i40e_pf *pf) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 037c1a0cbd6a8..eae5923104f79 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1241,12 +1241,30 @@ void i40e_update_stats(struct i40e_vsi *vsi) } /** - * i40e_count_filters - counts VSI mac filters + * i40e_count_all_filters - counts VSI MAC filters * @vsi: the VSI to be searched * - * Returns count of mac filters - **/ -int i40e_count_filters(struct i40e_vsi *vsi) + * Return: count of MAC filters in any state. + */ +int i40e_count_all_filters(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + struct hlist_node *h; + int bkt, cnt = 0; + + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) + cnt++; + + return cnt; +} + +/** + * i40e_count_active_filters - counts VSI MAC filters + * @vsi: the VSI to be searched + * + * Return: count of active MAC filters. + */ +int i40e_count_active_filters(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; struct hlist_node *h; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 97f32a0c68d09..646e394f51903 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -448,7 +448,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | - (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); + FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx); wr32(hw, reg_idx, reg); } @@ -653,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, /* only set the required fields */ tx_ctx.base = info->dma_ring_addr / 128; + + /* ring_len has to be multiple of 8 */ + if (!IS_ALIGNED(info->ring_len, 8) || + info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { + ret = -EINVAL; + goto error_context; + } tx_ctx.qlen = info->ring_len; tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); tx_ctx.rdylist_act = 0; @@ -716,6 +723,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, /* only set the required fields */ rx_ctx.base = info->dma_ring_addr / 128; + + /* ring_len has to be multiple of 32 */ + if (!IS_ALIGNED(info->ring_len, 32) || + info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { + ret = -EINVAL; + goto error_param; + } rx_ctx.qlen = info->ring_len; if (info->splithdr_enabled) { @@ -1453,6 +1467,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) * functions that may still be running at this point. */ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); + clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. @@ -2119,7 +2134,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) size_t len = 0; int ret; - if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { + i40e_sync_vf_state(vf, I40E_VF_STATE_INIT); + + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) || + test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) { aq_ret = -EINVAL; goto err; } @@ -2222,6 +2240,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vf->default_lan_addr.addr); } set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); + set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states); err: /* send the response back to the VF */ @@ -2384,7 +2403,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) } if (vf->adq_enabled) { - if (idx >= ARRAY_SIZE(vf->ch)) { + if (idx >= vf->num_tc) { aq_ret = -ENODEV; goto error_param; } @@ -2405,7 +2424,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) * to its appropriate VSIs based on TC mapping */ if (vf->adq_enabled) { - if (idx >= ARRAY_SIZE(vf->ch)) { + if (idx >= vf->num_tc) { aq_ret = -ENODEV; goto error_param; } @@ -2455,8 +2474,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, queue_id; for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { - if (vf->adq_enabled) { - vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; + u16 idx = vsi_queue_id / I40E_MAX_VF_VSI; + + if (vf->adq_enabled && idx < vf->num_tc) { + vsi_id = vf->ch[idx].vsi_id; queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); } else { queue_id = vsi_queue_id; @@ -2844,24 +2865,6 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) (u8 *)&stats, sizeof(stats)); } -/** - * i40e_can_vf_change_mac - * @vf: pointer to the VF info - * - * Return true if the VF is allowed to change its MAC filters, false otherwise - */ -static bool i40e_can_vf_change_mac(struct i40e_vf *vf) -{ - /* If the VF MAC address has been set administratively (via the - * ndo_set_vf_mac command), then deny permission to the VF to - * add/delete unicast MAC addresses, unless the VF is trusted - */ - if (vf->pf_set_mac && !vf->trusted) - return false; - - return true; -} - #define I40E_MAX_MACVLAN_PER_HW 3072 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ (num_ports)) @@ -2900,8 +2903,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; struct i40e_hw *hw = &pf->hw; - int mac2add_cnt = 0; - int i; + int i, mac_add_max, mac_add_cnt = 0; + bool vf_trusted; + + vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; @@ -2921,9 +2926,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ - if (!i40e_can_vf_change_mac(vf) && - !is_multicast_ether_addr(addr) && - !ether_addr_equal(addr, vf->default_lan_addr.addr)) { + if (!vf_trusted && !is_multicast_ether_addr(addr) && + vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); return -EPERM; @@ -2932,29 +2936,33 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, /*count filters that really will be added*/ f = i40e_find_mac(vsi, addr); if (!f) - ++mac2add_cnt; + ++mac_add_cnt; } /* If this VF is not privileged, then we can't add more than a limited - * number of addresses. Check to make sure that the additions do not - * push us over the limit. - */ - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { - if ((i40e_count_filters(vsi) + mac2add_cnt) > - I40E_VC_MAX_MAC_ADDR_PER_VF) { - dev_err(&pf->pdev->dev, - "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); - return -EPERM; - } - /* If this VF is trusted, it can use more resources than untrusted. + * number of addresses. + * + * If this VF is trusted, it can use more resources than untrusted. * However to ensure that every trusted VF has appropriate number of * resources, divide whole pool of resources per port and then across * all VFs. */ - } else { - if ((i40e_count_filters(vsi) + mac2add_cnt) > - I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, - hw->num_ports)) { + if (!vf_trusted) + mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF; + else + mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports); + + /* VF can replace all its filters in one step, in this case mac_add_max + * will be added as active and another mac_add_max will be in + * a to-be-removed state. Account for that. + */ + if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max || + (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) { + if (!vf_trusted) { + dev_err(&pf->pdev->dev, + "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); + return -EPERM; + } else { dev_err(&pf->pdev->dev, "Cannot add more MAC addresses, trusted VF exhausted it's resources\n"); return -EPERM; @@ -3589,7 +3597,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, /* action_meta is TC number here to which the filter is applied */ if (!tc_filter->action_meta || - tc_filter->action_meta > vf->num_tc) { + tc_filter->action_meta >= vf->num_tc) { dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", vf->vf_id, tc_filter->action_meta); goto err; @@ -3887,6 +3895,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) aq_ret); } +#define I40E_MAX_VF_CLOUD_FILTER 0xFF00 + /** * i40e_vc_add_cloud_filter * @vf: pointer to the VF info @@ -3926,6 +3936,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) goto err_out; } + if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) { + dev_warn(&pf->pdev->dev, + "VF %d: Max number of filters reached, can't apply cloud filter\n", + vf->vf_id); + aq_ret = -ENOSPC; + goto err_out; + } + cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); if (!cfilter) { aq_ret = -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 66f95e2f3146a..e0e797fea138a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -41,7 +41,8 @@ enum i40e_vf_states { I40E_VF_STATE_MC_PROMISC, I40E_VF_STATE_UC_PROMISC, I40E_VF_STATE_PRE_ENABLE, - I40E_VF_STATE_RESETTING + I40E_VF_STATE_RESETTING, + I40E_VF_STATE_RESOURCES_LOADED, }; /* VF capabilities */ diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index 10285995c9edd..cc4798026182e 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -98,19 +98,21 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) index = ice_adapter_xa_index(pdev); scoped_guard(mutex, &ice_adapters_mutex) { - err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); - if (err == -EBUSY) { - adapter = xa_load(&ice_adapters, index); + adapter = xa_load(&ice_adapters, index); + if (adapter) { refcount_inc(&adapter->refcount); WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev)); return adapter; } + err = xa_reserve(&ice_adapters, index, GFP_KERNEL); if (err) return ERR_PTR(err); adapter = ice_adapter_new(pdev); - if (!adapter) + if (!adapter) { + xa_release(&ice_adapters, index); return ERR_PTR(-ENOMEM); + } xa_store(&ice_adapters, index, adapter, GFP_KERNEL); } return adapter; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 4086a6ef352e5..087a3077d5481 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3216,18 +3216,14 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) /* get the Rx desc from Rx queue based on 'next_to_clean' */ rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb; - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc - */ - dma_rmb(); - /* if the descriptor isn't done, no work yet to do */ gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M); - if (idpf_queue_has(GEN_CHK, rxq) != gen_id) break; + dma_rmb(); + rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M, rx_desc->rxdid_ucast); if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) { diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index f27a8cf3816db..d1f374da00981 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -727,9 +727,9 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter) /* If post failed clear the only buffer we supplied */ if (post_err) { if (dma_mem) - dmam_free_coherent(&adapter->pdev->dev, - dma_mem->size, dma_mem->va, - dma_mem->pa); + dma_free_coherent(&adapter->pdev->dev, + dma_mem->size, dma_mem->va, + dma_mem->pa); break; } diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 5f08779c0e4e3..e177d1d58696a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef _IXGBEVF_DEFINES_H_ #define _IXGBEVF_DEFINES_H_ @@ -16,6 +16,9 @@ #define IXGBE_DEV_ID_X550_VF_HV 0x1564 #define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 +#define IXGBE_DEV_ID_E610_VF 0x57AD +#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF + #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 8 #define IXGBE_VF_MAX_RX_QUEUES 8 @@ -25,6 +28,7 @@ /* Link speed */ typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_UNKNOWN 0 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 #define IXGBE_LINK_SPEED_100_FULL 0x0008 diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index f804b35d79c72..83b3243f172c7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -271,6 +271,9 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, adapter = netdev_priv(dev); ipsec = adapter->ipsec; + if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC)) + return -EOPNOTSUPP; + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload"); return -EINVAL; @@ -400,6 +403,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) adapter = netdev_priv(dev); ipsec = adapter->ipsec; + if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC)) + return; + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; @@ -628,6 +634,10 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) size_t size; switch (adapter->hw.api_version) { + case ixgbe_mbox_api_17: + if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC)) + return; + break; case ixgbe_mbox_api_14: break; default: diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 130cb868774c4..f31068e24e867 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef _IXGBEVF_H_ #define _IXGBEVF_H_ @@ -366,6 +366,13 @@ struct ixgbevf_adapter { /* Interrupt Throttle Rate */ u32 eitr_param; + u32 pf_features; +#define IXGBEVF_PF_SUP_IPSEC BIT(0) +#define IXGBEVF_PF_SUP_ESX_MBX BIT(1) + +#define IXGBEVF_SUPPORTED_FEATURES (IXGBEVF_PF_SUP_IPSEC | \ + IXGBEVF_PF_SUP_ESX_MBX) + struct ixgbevf_hw_stats stats; unsigned long state; @@ -418,6 +425,8 @@ enum ixgbevf_boards { board_X550EM_x_vf, board_X550EM_x_vf_hv, board_x550em_a_vf, + board_e610_vf, + board_e610_vf_hv, }; enum ixgbevf_xcast_modes { @@ -434,11 +443,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy; extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info; +extern const struct ixgbevf_info ixgbevf_e610_vf_info; extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info; +extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info; extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops; /* needed by ethtool.c */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 149911e3002a2..a2a7cb6d8ea18 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code @@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2018 Intel Corporation."; + "Copyright (c) 2009 - 2024 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info, + [board_e610_vf] = &ixgbevf_e610_vf_info, + [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf }, + {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID, + IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf}, /* required last entry */ {0, } }; @@ -2269,10 +2274,36 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; } +/** + * ixgbevf_set_features - Set features supported by PF + * @adapter: pointer to the adapter struct + * + * Negotiate with PF supported features and then set pf_features accordingly. + */ +static void ixgbevf_set_features(struct ixgbevf_adapter *adapter) +{ + u32 *pf_features = &adapter->pf_features; + struct ixgbe_hw *hw = &adapter->hw; + int err; + + err = hw->mac.ops.negotiate_features(hw, pf_features); + if (err && err != -EOPNOTSUPP) + netdev_dbg(adapter->netdev, + "PF feature negotiation failed.\n"); + + /* Address also pre API 1.7 cases */ + if (hw->api_version == ixgbe_mbox_api_14) + *pf_features |= IXGBEVF_PF_SUP_IPSEC; + else if (hw->api_version == ixgbe_mbox_api_15) + *pf_features |= IXGBEVF_PF_SUP_ESX_MBX; +} + static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; static const int api[] = { + ixgbe_mbox_api_17, + ixgbe_mbox_api_16, ixgbe_mbox_api_15, ixgbe_mbox_api_14, ixgbe_mbox_api_13, @@ -2292,7 +2323,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) idx++; } - if (hw->api_version >= ixgbe_mbox_api_15) { + ixgbevf_set_features(adapter); + + if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) { hw->mbx.ops.init_params(hw); memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, sizeof(struct ixgbe_mbx_operations)); @@ -2649,6 +2682,8 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; @@ -4643,6 +4678,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; @@ -4693,6 +4730,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mac_X540_vf: dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); break; + case ixgbe_mac_e610_vf: + dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n"); + break; case ixgbe_mac_82599_vf: default: dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 835bbcc5cc8e6..a8ed23ee66aa8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -66,6 +66,8 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */ + ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */ + ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -102,6 +104,12 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ +/* mailbox API, version 1.6 VF requests */ +#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */ + +/* mailbox API, version 1.7 VF requests */ +#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF*/ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 1641d00d8ed35..65257107dfc8a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #include "vf.h" #include "ixgbevf.h" @@ -313,6 +313,8 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) * is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_17: + case ixgbe_mbox_api_16: case ixgbe_mbox_api_15: case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: @@ -382,6 +384,8 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) * or if the operation is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_17: + case ixgbe_mbox_api_16: case ixgbe_mbox_api_15: case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: @@ -552,6 +556,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: break; default: return -EOPNOTSUPP; @@ -624,6 +630,85 @@ static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) return -EOPNOTSUPP; } +/** + * ixgbevf_get_pf_link_state - Get PF's link status + * @hw: pointer to the HW structure + * @speed: link speed + * @link_up: indicate if link is up/down + * + * Ask PF to provide link_up state and speed of the link. + * + * Return: IXGBE_ERR_MBX in the case of mailbox error, + * -EOPNOTSUPP if the op is not supported or 0 on success. + */ +static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + u32 msgbuf[3] = {}; + int err; + + switch (hw->api_version) { + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE; + + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + ARRAY_SIZE(msgbuf)); + if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) { + err = IXGBE_ERR_MBX; + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* No need to set @link_up to false as it will be done by + * ixgbe_check_mac_link_vf(). + */ + } else { + *speed = msgbuf[1]; + *link_up = msgbuf[2]; + } + + return err; +} + +/** + * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver + * @hw: pointer to the HW structure + * @pf_features: bitmask of features supported by PF + * + * Return: IXGBE_ERR_MBX in the case of mailbox error, + * -EOPNOTSUPP if the op is not supported or 0 on success. + */ +static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features) +{ + u32 msgbuf[2] = {}; + int err; + + switch (hw->api_version) { + case ixgbe_mbox_api_17: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE; + msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES; + + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + ARRAY_SIZE(msgbuf)); + + if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) { + err = IXGBE_ERR_MBX; + *pf_features = 0x0; + } else { + *pf_features = msgbuf[1]; + } + + return err; +} + /** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure @@ -658,6 +743,58 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, return err; } +/** + * ixgbe_read_vflinks - Read VFLINKS register + * @hw: pointer to the HW structure + * @speed: link speed + * @link_up: indicate if link is up/down + * + * Get linkup status and link speed from the VFLINKS register. + */ +static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + /* if link status is down no point in checking to see if PF is up */ + if (!(vflinks & IXGBE_LINKS_UP)) { + *link_up = false; + return; + } + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (hw->mac.type == ixgbe_mac_82599_vf) { + for (int i = 0; i < 5; i++) { + udelay(100); + vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(vflinks & IXGBE_LINKS_UP)) { + *link_up = false; + return; + } + } + } + + /* We reached this point so there's link */ + *link_up = true; + + switch (vflinks & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } +} + /** * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub. * @hw: unused @@ -702,10 +839,10 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, bool *link_up, bool autoneg_wait_to_complete) { + struct ixgbevf_adapter *adapter = hw->back; struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mac_info *mac = &hw->mac; s32 ret_val = 0; - u32 links_reg; u32 in_msg = 0; /* If we were hit with a reset drop the link */ @@ -715,43 +852,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, if (!mac->get_link_status) goto out; - /* if link status is down no point in checking to see if pf is up */ - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - if (!(links_reg & IXGBE_LINKS_UP)) - goto out; - - /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs - * before the link status is correct - */ - if (mac->type == ixgbe_mac_82599_vf) { - int i; - - for (i = 0; i < 5; i++) { - udelay(100); - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - - if (!(links_reg & IXGBE_LINKS_UP)) - goto out; - } - } - - switch (links_reg & IXGBE_LINKS_SPEED_82599) { - case IXGBE_LINKS_SPEED_10G_82599: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - case IXGBE_LINKS_SPEED_1G_82599: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case IXGBE_LINKS_SPEED_100_82599: - *speed = IXGBE_LINK_SPEED_100_FULL; - break; + if (hw->mac.type == ixgbe_mac_e610_vf) { + ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up); + if (ret_val) + goto out; + } else { + ixgbe_read_vflinks(hw, speed, link_up); + if (*link_up == false) + goto out; } /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ if (mbx->ops.read(hw, &in_msg, 1)) { - if (hw->api_version >= ixgbe_mbox_api_15) + if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) mac->get_link_status = false; goto out; } @@ -951,6 +1066,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: + case ixgbe_mbox_api_16: + case ixgbe_mbox_api_17: break; default: return 0; @@ -1005,6 +1122,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .setup_link = ixgbevf_setup_mac_link_vf, .check_link = ixgbevf_check_mac_link_vf, .negotiate_api_version = ixgbevf_negotiate_api_version_vf, + .negotiate_features = ixgbevf_negotiate_features_vf, .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_update_xcast_mode, @@ -1076,3 +1194,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = { .mac = ixgbe_mac_x550em_a_vf, .mac_ops = &ixgbevf_mac_ops, }; + +const struct ixgbevf_info ixgbevf_e610_vf_info = { + .mac = ixgbe_mac_e610_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_e610_vf_hv_info = { + .mac = ixgbe_mac_e610_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index b4eef5b6c172b..4f19b8900c29a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 1999 - 2018 Intel Corporation. */ +/* Copyright(c) 1999 - 2024 Intel Corporation. */ #ifndef __IXGBE_VF_H__ #define __IXGBE_VF_H__ @@ -26,6 +26,7 @@ struct ixgbe_mac_operations { s32 (*stop_adapter)(struct ixgbe_hw *); s32 (*get_bus_info)(struct ixgbe_hw *); s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api); + int (*negotiate_features)(struct ixgbe_hw *hw, u32 *pf_features); /* Link */ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); @@ -54,6 +55,8 @@ enum ixgbe_mac_type { ixgbe_mac_X550_vf, ixgbe_mac_X550EM_x_vf, ixgbe_mac_x550em_a_vf, + ixgbe_mac_e610, + ixgbe_mac_e610_vf, ixgbe_num_macs }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 971993586fb49..ca74bf6843369 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -21,8 +21,7 @@ #include "rvu.h" #include "lmac_common.h" -#define DRV_NAME "Marvell-CGX/RPM" -#define DRV_STRING "Marvell CGX/RPM Driver" +#define DRV_NAME "Marvell-CGX-RPM" #define CGX_RX_STAT_GLOBAL_INDEX 9 diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index e63cc1eb6d891..ed041c3f714f2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -1319,7 +1319,6 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, free_leaf: otx2_tc_del_from_flow_list(flow_cfg, new_node); - kfree_rcu(new_node, rcu); if (new_node->is_act_police) { mutex_lock(&nic->mbox.lock); @@ -1339,6 +1338,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, mutex_unlock(&nic->mbox.lock); } + kfree_rcu(new_node, rcu); return rc; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 281b34af0bb48..3160a1a2d2ab3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1180,9 +1180,9 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, mlx4_unregister_mac(mdev->dev, priv->port, mac); hlist_del_rcu(&entry->hlist); - kfree_rcu(entry, rcu); en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", entry->mac, priv->port); + kfree_rcu(entry, rcu); ++removed; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 5bb4940da59d4..b51c006277598 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -289,6 +289,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent) return; } cond_resched(); + if (mlx5_cmd_is_down(dev)) { + ent->ret = -ENXIO; + return; + } } while (time_before(jiffies, poll_end)); ent->ret = -ETIMEDOUT; @@ -1066,7 +1070,7 @@ static void cmd_work_handler(struct work_struct *work) poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); - mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT)); + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e048a667e0758..f2952a6b0db73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -512,6 +512,12 @@ struct mlx5e_xdpsq { struct mlx5e_channel *channel; } ____cacheline_aligned_in_smp; +struct mlx5e_xdp_buff { + struct xdp_buff xdp; + struct mlx5_cqe64 *cqe; + struct mlx5e_rq *rq; +}; + struct mlx5e_ktls_resync_resp; struct mlx5e_icosq { @@ -710,6 +716,7 @@ struct mlx5e_rq { struct mlx5e_xdpsq *xdpsq; DECLARE_BITMAP(flags, 8); struct page_pool *page_pool; + struct mlx5e_xdp_buff mxbuf; /* AF_XDP zero-copy */ struct xsk_buff_pool *xsk_pool; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 58ec5e44aa7ad..3dac708c0d75a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -99,7 +99,7 @@ u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode) return sizeof(struct mlx5_ksm) * 4; } WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode); - return 0; + return 1; } u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h index 66d276a1be836..f4a19ffbb641c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -66,23 +66,11 @@ struct mlx5e_port_buffer { struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER]; }; -#ifdef CONFIG_MLX5_CORE_EN_DCB int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 change, unsigned int mtu, struct ieee_pfc *pfc, u32 *buffer_size, u8 *prio2buffer); -#else -static inline int -mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, - u32 change, unsigned int mtu, - void *pfc, - u32 *buffer_size, - u8 *prio2buffer) -{ - return 0; -} -#endif int mlx5e_port_query_buffer(struct mlx5e_priv *priv, struct mlx5e_port_buffer *port_buffer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index e054db1e10f8a..75256cf978c86 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -45,12 +45,6 @@ (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \ sizeof(struct mlx5_wqe_inline_seg)) -struct mlx5e_xdp_buff { - struct xdp_buff xdp; - struct mlx5_cqe64 *cqe; - struct mlx5e_rq *rq; -}; - /* XDP packets can be transmitted in different ways. On completion, we need to * distinguish between them to clean up things in a proper way. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 9aff779c77c89..78e78b6f81467 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -337,6 +337,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, struct mlx5e_priv *master_priv); void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event); +void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv); static inline struct mlx5_core_dev * mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry) @@ -382,6 +383,10 @@ static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *sl static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) { } + +static inline void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv) +{ +} #endif #endif /* __MLX5E_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 59b9653f573c8..131eb9b4eba65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -2421,9 +2421,30 @@ void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) { - if (!priv->ipsec) - return; /* IPsec not supported */ + if (!priv->ipsec || mlx5_devcom_comp_get_size(priv->devcom) < 2) + return; /* IPsec not supported or no peers */ mlx5_devcom_send_event(priv->devcom, event, event, priv); wait_for_completion(&priv->ipsec->comp); } + +void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv) +{ + struct mlx5_devcom_comp_dev *tmp = NULL; + struct mlx5e_priv *peer_priv; + + if (!priv->devcom) + return; + + if (!mlx5_devcom_for_each_peer_begin(priv->devcom)) + goto out; + + peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp); + if (peer_priv) + complete_all(&peer_priv->ipsec->comp); + + mlx5_devcom_for_each_peer_end(priv->devcom); +out: + mlx5_devcom_unregister_component(priv->devcom); + priv->devcom = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index de2327ffb0f78..7e04a17fa3b82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -47,7 +47,6 @@ #include "en.h" #include "en/dim.h" #include "en/txrx.h" -#include "en/port_buffer.h" #include "en_tc.h" #include "en_rep.h" #include "en_accel/ipsec.h" @@ -258,6 +257,7 @@ static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv) } mlx5_devcom_unregister_component(priv->devcom); + priv->devcom = NULL; } static int blocking_event(struct notifier_block *nb, unsigned long event, void *data) @@ -2918,11 +2918,9 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) struct mlx5e_params *params = &priv->channels.params; struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - u16 mtu, prev_mtu; + u16 mtu; int err; - mlx5e_query_mtu(mdev, params, &prev_mtu); - err = mlx5e_set_mtu(mdev, params, params->sw_mtu); if (err) return err; @@ -2932,18 +2930,6 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", __func__, mtu, params->sw_mtu); - if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) { - err = mlx5e_port_manual_buffer_config(priv, 0, mtu, - NULL, NULL, NULL); - if (err) { - netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n", - __func__, mtu, err, prev_mtu); - - mlx5e_set_mtu(mdev, params, prev_mtu); - return err; - } - } - params->sw_mtu = mtu; return 0; } @@ -5845,6 +5831,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) if (mlx5e_monitor_counter_supported(priv)) mlx5e_monitor_counter_cleanup(priv); + mlx5e_ipsec_disable_events(priv); mlx5e_disable_blocking_events(priv); if (priv->en_trap) { mlx5e_deactivate_trap(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 673043d9ed11a..59aa10f1a9d95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1691,17 +1691,17 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, prog = rcu_dereference(rq->xdp_prog); if (prog) { - struct mlx5e_xdp_buff mxbuf; + struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; net_prefetchw(va); /* xdp_frame data area */ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, - cqe_bcnt, &mxbuf); - if (mlx5e_xdp_handle(rq, prog, &mxbuf)) + cqe_bcnt, mxbuf); + if (mlx5e_xdp_handle(rq, prog, mxbuf)) return NULL; /* page/packet was consumed by XDP */ - rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start; - metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta; - cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data; + rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start; + metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta; + cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data; } frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); @@ -1720,11 +1720,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi struct mlx5_cqe64 *cqe, u32 cqe_bcnt) { struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; struct mlx5e_wqe_frag_info *head_wi = wi; u16 rx_headroom = rq->buff.headroom; struct mlx5e_frag_page *frag_page; struct skb_shared_info *sinfo; - struct mlx5e_xdp_buff mxbuf; u32 frag_consumed_bytes; struct bpf_prog *prog; struct sk_buff *skb; @@ -1744,8 +1744,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi net_prefetch(va + rx_headroom); mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, - frag_consumed_bytes, &mxbuf); - sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp); + frag_consumed_bytes, mxbuf); + sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp); truesize = 0; cqe_bcnt -= frag_consumed_bytes; @@ -1757,8 +1757,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); - mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, - wi->offset, frag_consumed_bytes); + mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp, + frag_page, wi->offset, + frag_consumed_bytes); truesize += frag_info->frag_stride; cqe_bcnt -= frag_consumed_bytes; @@ -1767,31 +1768,46 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi } prog = rcu_dereference(rq->xdp_prog); - if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) { - if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { - struct mlx5e_wqe_frag_info *pwi; + if (prog) { + u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; + + if (mlx5e_xdp_handle(rq, prog, mxbuf)) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, + rq->flags)) { + struct mlx5e_wqe_frag_info *pwi; + + wi -= old_nr_frags - sinfo->nr_frags; + + for (pwi = head_wi; pwi < wi; pwi++) + pwi->frag_page->frags++; + } + return NULL; /* page/packet was consumed by XDP */ + } - for (pwi = head_wi; pwi < wi; pwi++) - pwi->frag_page->frags++; + nr_frags_free = old_nr_frags - sinfo->nr_frags; + if (unlikely(nr_frags_free)) { + wi -= nr_frags_free; + truesize -= nr_frags_free * frag_info->frag_stride; } - return NULL; /* page/packet was consumed by XDP */ } - skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz, - mxbuf.xdp.data - mxbuf.xdp.data_hard_start, - mxbuf.xdp.data_end - mxbuf.xdp.data, - mxbuf.xdp.data - mxbuf.xdp.data_meta); + skb = mlx5e_build_linear_skb( + rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz, + mxbuf->xdp.data - mxbuf->xdp.data_hard_start, + mxbuf->xdp.data_end - mxbuf->xdp.data, + mxbuf->xdp.data - mxbuf->xdp.data_meta); if (unlikely(!skb)) return NULL; skb_mark_for_recycle(skb); head_wi->frag_page->frags++; - if (xdp_buff_has_frags(&mxbuf.xdp)) { + if (xdp_buff_has_frags(&mxbuf->xdp)) { /* sinfo->nr_frags is reset by build_skb, calculate again. */ xdp_update_skb_shared_info(skb, wi - head_wi - 1, sinfo->xdp_frags_size, truesize, - xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp)); + xdp_buff_is_frag_pfmemalloc( + &mxbuf->xdp)); for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++) pwi->frag_page->frags++; @@ -1991,11 +2007,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx]; u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); struct mlx5e_frag_page *head_page = frag_page; + struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; u32 frag_offset = head_offset; u32 byte_cnt = cqe_bcnt; struct skb_shared_info *sinfo; - struct mlx5e_xdp_buff mxbuf; unsigned int truesize = 0; + u32 pg_consumed_bytes; struct bpf_prog *prog; struct sk_buff *skb; u32 linear_frame_sz; @@ -2040,20 +2057,23 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w } } - mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf); + mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, + linear_data_len, mxbuf); - sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp); + sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp); while (byte_cnt) { /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ - u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); + pg_consumed_bytes = + min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) truesize += pg_consumed_bytes; else truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); - mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset, + mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp, + frag_page, frag_offset, pg_consumed_bytes); byte_cnt -= pg_consumed_bytes; frag_offset = 0; @@ -2061,10 +2081,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w } if (prog) { - if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { + u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; + u32 len; + + if (mlx5e_xdp_handle(rq, prog, mxbuf)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { struct mlx5e_frag_page *pfp; + frag_page -= old_nr_frags - sinfo->nr_frags; + for (pfp = head_page; pfp < frag_page; pfp++) pfp->frags++; @@ -2074,10 +2099,20 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w return NULL; /* page/packet was consumed by XDP */ } - skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, - linear_frame_sz, - mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0, - mxbuf.xdp.data - mxbuf.xdp.data_meta); + nr_frags_free = old_nr_frags - sinfo->nr_frags; + if (unlikely(nr_frags_free)) { + frag_page -= nr_frags_free; + truesize -= (nr_frags_free - 1) * PAGE_SIZE + + ALIGN(pg_consumed_bytes, + BIT(rq->mpwqe.log_stride_sz)); + } + + len = mxbuf->xdp.data_end - mxbuf->xdp.data; + + skb = mlx5e_build_linear_skb( + rq, mxbuf->xdp.data_hard_start, linear_frame_sz, + mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len, + mxbuf->xdp.data - mxbuf->xdp.data_meta); if (unlikely(!skb)) { mlx5e_page_release_fragmented(rq, &wi->linear_page); return NULL; @@ -2087,29 +2122,34 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w wi->linear_page.frags++; mlx5e_page_release_fragmented(rq, &wi->linear_page); - if (xdp_buff_has_frags(&mxbuf.xdp)) { + if (xdp_buff_has_frags(&mxbuf->xdp)) { struct mlx5e_frag_page *pagep; /* sinfo->nr_frags is reset by build_skb, calculate again. */ xdp_update_skb_shared_info(skb, frag_page - head_page, sinfo->xdp_frags_size, truesize, - xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp)); + xdp_buff_is_frag_pfmemalloc( + &mxbuf->xdp)); pagep = head_page; do pagep->frags++; while (++pagep < frag_page); + + headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len, + skb->data_len); + __pskb_pull_tail(skb, headlen); } - __pskb_pull_tail(skb, headlen); } else { dma_addr_t addr; - if (xdp_buff_has_frags(&mxbuf.xdp)) { + if (xdp_buff_has_frags(&mxbuf->xdp)) { struct mlx5e_frag_page *pagep; xdp_update_skb_shared_info(skb, sinfo->nr_frags, sinfo->xdp_frags_size, truesize, - xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp)); + xdp_buff_is_frag_pfmemalloc( + &mxbuf->xdp)); pagep = frag_page - sinfo->nr_frags; do @@ -2159,20 +2199,20 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, prog = rcu_dereference(rq->xdp_prog); if (prog) { - struct mlx5e_xdp_buff mxbuf; + struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; net_prefetchw(va); /* xdp_frame data area */ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, - cqe_bcnt, &mxbuf); - if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { + cqe_bcnt, mxbuf); + if (mlx5e_xdp_handle(rq, prog, mxbuf)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) frag_page->frags++; return NULL; /* page/packet was consumed by XDP */ } - rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start; - metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta; - cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data; + rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start; + metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta; + cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data; } frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 516df7f1997eb..35d2fe08c0fb5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -27,6 +27,7 @@ struct mlx5_fw_reset { struct work_struct reset_reload_work; struct work_struct reset_now_work; struct work_struct reset_abort_work; + struct delayed_work reset_timeout_work; unsigned long reset_flags; u8 reset_method; struct timer_list timer; @@ -258,6 +259,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool return -EALREADY; } + if (current_work() != &fw_reset->reset_timeout_work.work) + cancel_delayed_work(&fw_reset->reset_timeout_work); mlx5_stop_sync_reset_poll(dev); if (poll_health) mlx5_start_health_poll(dev); @@ -328,6 +331,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev) } mlx5_stop_health_poll(dev, true); mlx5_start_sync_reset_poll(dev); + + if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, + &fw_reset->reset_flags)) + schedule_delayed_work(&fw_reset->reset_timeout_work, + msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE))); return 0; } @@ -728,6 +736,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct } } +static void mlx5_sync_reset_timeout_work(struct work_struct *work) +{ + struct delayed_work *dwork = container_of(work, struct delayed_work, + work); + struct mlx5_fw_reset *fw_reset = + container_of(dwork, struct mlx5_fw_reset, reset_timeout_work); + struct mlx5_core_dev *dev = fw_reset->dev; + + if (mlx5_sync_reset_clear_reset_requested(dev, true)) + return; + mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n"); +} + static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb); @@ -811,6 +832,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev) cancel_work_sync(&fw_reset->reset_reload_work); cancel_work_sync(&fw_reset->reset_now_work); cancel_work_sync(&fw_reset->reset_abort_work); + cancel_delayed_work(&fw_reset->reset_timeout_work); } static const struct devlink_param mlx5_fw_reset_devlink_params[] = { @@ -854,6 +876,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev) INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work); INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event); INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event); + INIT_DELAYED_WORK(&fw_reset->reset_timeout_work, + mlx5_sync_reset_timeout_work); init_completion(&fw_reset->done); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 9bc9bd83c2324..cd68c4b2c0bf9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 func_id; u32 npages; u32 i = 0; + int err; - if (!mlx5_cmd_is_down(dev)) - return mlx5_cmd_do(dev, in, in_size, out, out_size); + err = mlx5_cmd_do(dev, in, in_size, out, out_size); + /* If FW is gone (-ENXIO), proceed to forceful reclaim */ + if (err != -ENXIO) + return err; /* No hard feelings, we want our pages back! */ npages = MLX5_GET(manage_pages_in, in, input_num_entries); diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c index c018783757fb2..858d4cbc83d3f 100644 --- a/drivers/net/ethernet/mscc/ocelot_stats.c +++ b/drivers/net/ethernet/mscc/ocelot_stats.c @@ -984,6 +984,6 @@ int ocelot_stats_init(struct ocelot *ocelot) void ocelot_stats_deinit(struct ocelot *ocelot) { - cancel_delayed_work(&ocelot->stats_work); + disable_delayed_work_sync(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index fbca8d0efd858..37a46596268a0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1789,7 +1789,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)) - return -EOPNOTSUPP; + return 0; return nfp_net_rss_key_sz(nn); } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 7b82779e4cd5d..80b5262d0d572 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5060,8 +5060,9 @@ static int rtl8169_resume(struct device *device) if (!device_may_wakeup(tp_to_dev(tp))) clk_prepare_enable(tp->clk); - /* Reportedly at least Asus X453MA truncates packets otherwise */ - if (tp->mac_version == RTL_GIGA_MAC_VER_37) + /* Some chip versions may truncate packets without this initialization */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_46) rtl_init_rxcfg(tp); return rtl8169_runtime_resume(device); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index cc4f0d16c7630..641ad4054df45 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2203,15 +2203,35 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); } - /* Descriptor type must be set after all the above writes */ - dma_wmb(); + if (num_tx_desc > 1) { desc->die_dt = DT_FEND; desc--; + /* When using multi-descriptors, DT_FEND needs to get written + * before DT_FSTART, but the compiler may reorder the memory + * writes in an attempt to optimize the code. + * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART + * are written exactly in the order shown in the code. + * This is particularly important for cases where the DMA engine + * is already running when we are running this code. If the DMA + * sees DT_FSTART without the corresponding DT_FEND it will enter + * an error condition. + */ + dma_wmb(); desc->die_dt = DT_FSTART; } else { + /* Descriptor type must be set after all the above writes */ + dma_wmb(); desc->die_dt = DT_FSINGLE; } + + /* Before ringing the doorbell we need to make sure that the latest + * writes have been committed to memory, otherwise it could delay + * things until the doorbell is rang again. + * This is in replacement of the read operation mentioned in the HW + * manuals. + */ + dma_wmb(); ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); priv->cur_tx[q] += num_tx_desc; diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 7f7d560cb2b4c..14dcca4ffb335 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -450,8 +450,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data) net_dev->hw_enc_features |= efx->type->offload_features; net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO; - netif_set_tso_max_segs(net_dev, - ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT); + nic_data = efx->nic_data; + netif_set_tso_max_size(efx->net_dev, nic_data->tso_max_payload_len); + netif_set_tso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs); efx->mdio.dev = net_dev; rc = efx_ef100_init_datapath_caps(efx); @@ -478,7 +479,6 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data) /* Don't fail init if RSS setup doesn't work. */ efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels); - nic_data = efx->nic_data; rc = ef100_get_mac_address(efx, net_dev->perm_addr, CLIENT_HANDLE_SELF, efx->type->is_vf); if (rc) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 6da06931187d6..5b1bdcac81d99 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -887,8 +887,7 @@ static int ef100_process_design_param(struct efx_nic *efx, case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS: /* We always put HDR_NUM_SEGS=1 in our TSO descriptors */ if (!reader->value) { - netif_err(efx, probe, efx->net_dev, - "TSO_MAX_HDR_NUM_SEGS < 1\n"); + pci_err(efx->pci_dev, "TSO_MAX_HDR_NUM_SEGS < 1\n"); return -EOPNOTSUPP; } return 0; @@ -901,32 +900,28 @@ static int ef100_process_design_param(struct efx_nic *efx, */ if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE || EFX_MIN_DMAQ_SIZE % (u32)reader->value) { - netif_err(efx, probe, efx->net_dev, - "%s size granularity is %llu, can't guarantee safety\n", - reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ", - reader->value); + pci_err(efx->pci_dev, + "%s size granularity is %llu, can't guarantee safety\n", + reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ", + reader->value); return -EOPNOTSUPP; } return 0; case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN: nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_LEGACY_MAX_SIZE); - netif_set_tso_max_size(efx->net_dev, - nic_data->tso_max_payload_len); return 0; case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS: nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff); - netif_set_tso_max_segs(efx->net_dev, - nic_data->tso_max_payload_num_segs); return 0; case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES: nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff); return 0; case ESE_EF100_DP_GZ_COMPAT: if (reader->value) { - netif_err(efx, probe, efx->net_dev, - "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n", - reader->value); + pci_err(efx->pci_dev, + "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n", + reader->value); return -EOPNOTSUPP; } return 0; @@ -946,10 +941,10 @@ static int ef100_process_design_param(struct efx_nic *efx, * So the value of this shouldn't matter. */ if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT) - netif_dbg(efx, probe, efx->net_dev, - "NIC has other than default VI_STRIDES (mask " - "%#llx), early probing might use wrong one\n", - reader->value); + pci_dbg(efx->pci_dev, + "NIC has other than default VI_STRIDES (mask " + "%#llx), early probing might use wrong one\n", + reader->value); return 0; case ESE_EF100_DP_GZ_RX_MAX_RUNT: /* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't @@ -961,9 +956,9 @@ static int ef100_process_design_param(struct efx_nic *efx, /* Host interface says "Drivers should ignore design parameters * that they do not recognise." */ - netif_dbg(efx, probe, efx->net_dev, - "Ignoring unrecognised design parameter %u\n", - reader->type); + pci_dbg(efx->pci_dev, + "Ignoring unrecognised design parameter %u\n", + reader->type); return 0; } } @@ -999,13 +994,13 @@ static int ef100_check_design_params(struct efx_nic *efx) */ if (reader.state != EF100_TLV_TYPE) { if (reader.state == EF100_TLV_TYPE_CONT) - netif_err(efx, probe, efx->net_dev, - "truncated design parameter (incomplete type %u)\n", - reader.type); + pci_err(efx->pci_dev, + "truncated design parameter (incomplete type %u)\n", + reader.type); else - netif_err(efx, probe, efx->net_dev, - "truncated design parameter %u\n", - reader.type); + pci_err(efx->pci_dev, + "truncated design parameter %u\n", + reader.type); rc = -EIO; } out: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 8f90eae937741..d152afa48d5c2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1721,14 +1721,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) } } else { if (bsp_priv->clk_enabled) { + if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) { + bsp_priv->ops->set_clock_selection(bsp_priv, + bsp_priv->clock_input, false); + } + clk_bulk_disable_unprepare(bsp_priv->num_clks, bsp_priv->clks); clk_disable_unprepare(bsp_priv->clk_phy); - if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) - bsp_priv->ops->set_clock_selection(bsp_priv, - bsp_priv->clock_input, false); - bsp_priv->clk_enabled = false; } } diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c index 59d6ab989c554..8ffbfaa3ab18c 100644 --- a/drivers/net/ethernet/ti/am65-cpts.c +++ b/drivers/net/ethernet/ti/am65-cpts.c @@ -163,7 +163,9 @@ struct am65_cpts { struct device_node *clk_mux_np; struct clk *refclk; u32 refclk_freq; - struct list_head events; + /* separate lists to handle TX and RX timestamp independently */ + struct list_head events_tx; + struct list_head events_rx; struct list_head pool; struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS]; spinlock_t lock; /* protects events lists*/ @@ -227,6 +229,24 @@ static void am65_cpts_disable(struct am65_cpts *cpts) am65_cpts_write32(cpts, 0, int_enable); } +static int am65_cpts_purge_event_list(struct am65_cpts *cpts, + struct list_head *events) +{ + struct list_head *this, *next; + struct am65_cpts_event *event; + int removed = 0; + + list_for_each_safe(this, next, events) { + event = list_entry(this, struct am65_cpts_event, list); + if (time_after(jiffies, event->tmo)) { + list_del_init(&event->list); + list_add(&event->list, &cpts->pool); + ++removed; + } + } + return removed; +} + static int am65_cpts_event_get_port(struct am65_cpts_event *event) { return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >> @@ -239,20 +259,12 @@ static int am65_cpts_event_get_type(struct am65_cpts_event *event) AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT; } -static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts) +static int am65_cpts_purge_events(struct am65_cpts *cpts) { - struct list_head *this, *next; - struct am65_cpts_event *event; int removed = 0; - list_for_each_safe(this, next, &cpts->events) { - event = list_entry(this, struct am65_cpts_event, list); - if (time_after(jiffies, event->tmo)) { - list_del_init(&event->list); - list_add(&event->list, &cpts->pool); - ++removed; - } - } + removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx); + removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx); if (removed) dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed); @@ -287,7 +299,7 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts) struct am65_cpts_event, list); if (!event) { - if (am65_cpts_cpts_purge_events(cpts)) { + if (am65_cpts_purge_events(cpts)) { dev_err(cpts->dev, "cpts: event pool empty\n"); ret = -1; goto out; @@ -306,11 +318,21 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts) cpts->timestamp); break; case AM65_CPTS_EV_RX: + event->tmo = jiffies + + msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT); + + list_move_tail(&event->list, &cpts->events_rx); + + dev_dbg(cpts->dev, + "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n", + event->event1, event->event2, + event->timestamp); + break; case AM65_CPTS_EV_TX: event->tmo = jiffies + msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT); - list_move_tail(&event->list, &cpts->events); + list_move_tail(&event->list, &cpts->events_tx); dev_dbg(cpts->dev, "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n", @@ -828,7 +850,7 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts, return found; } -static void am65_cpts_find_ts(struct am65_cpts *cpts) +static void am65_cpts_find_tx_ts(struct am65_cpts *cpts) { struct am65_cpts_event *event; struct list_head *this, *next; @@ -837,7 +859,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts) LIST_HEAD(events); spin_lock_irqsave(&cpts->lock, flags); - list_splice_init(&cpts->events, &events); + list_splice_init(&cpts->events_tx, &events); spin_unlock_irqrestore(&cpts->lock, flags); list_for_each_safe(this, next, &events) { @@ -850,7 +872,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts) } spin_lock_irqsave(&cpts->lock, flags); - list_splice_tail(&events, &cpts->events); + list_splice_tail(&events, &cpts->events_tx); list_splice_tail(&events_free, &cpts->pool); spin_unlock_irqrestore(&cpts->lock, flags); } @@ -861,7 +883,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp) unsigned long flags; long delay = -1; - am65_cpts_find_ts(cpts); + am65_cpts_find_tx_ts(cpts); spin_lock_irqsave(&cpts->txq.lock, flags); if (!skb_queue_empty(&cpts->txq)) @@ -905,7 +927,7 @@ static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid) spin_lock_irqsave(&cpts->lock, flags); __am65_cpts_fifo_read(cpts); - list_for_each_safe(this, next, &cpts->events) { + list_for_each_safe(this, next, &cpts->events_rx) { event = list_entry(this, struct am65_cpts_event, list); if (time_after(jiffies, event->tmo)) { list_move(&event->list, &cpts->pool); @@ -1155,7 +1177,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs, return ERR_PTR(ret); mutex_init(&cpts->ptp_clk_lock); - INIT_LIST_HEAD(&cpts->events); + INIT_LIST_HEAD(&cpts->events_tx); + INIT_LIST_HEAD(&cpts->events_rx); INIT_LIST_HEAD(&cpts->pool); spin_lock_init(&cpts->lock); skb_queue_head_init(&cpts->txq); diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index ee2a7b2f6268d..e2d92295ad33b 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -433,6 +433,7 @@ static void nsim_enable_napi(struct netdevsim *ns) static int nsim_open(struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + struct netdevsim *peer; int err; err = nsim_init_napi(ns); @@ -441,6 +442,12 @@ static int nsim_open(struct net_device *dev) nsim_enable_napi(ns); + peer = rtnl_dereference(ns->peer); + if (peer && netif_running(peer->netdev)) { + netif_carrier_on(dev); + netif_carrier_on(peer->netdev); + } + return 0; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 92e9eb4146d9b..f60cf630bdb3d 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -3870,6 +3870,8 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev) { struct lan8814_shared_priv *shared = phydev->shared->priv; + shared->phydev = phydev; + /* Initialise shared lock for clock*/ mutex_init(&shared->shared_lock); @@ -3921,8 +3923,6 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev) phydev_dbg(phydev, "successfully registered ptp clock\n"); - shared->phydev = phydev; - /* The EP.4 is shared between all the PHYs in the package and also it * can be accessed by any of the PHYs */ diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 7b33993f7001e..f1827a1bd7a59 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -360,6 +360,11 @@ static void sfp_fixup_ignore_tx_fault(struct sfp *sfp) sfp->state_ignore_mask |= SFP_F_TX_FAULT; } +static void sfp_fixup_ignore_hw(struct sfp *sfp, unsigned int mask) +{ + sfp->state_hw_mask &= ~mask; +} + static void sfp_fixup_nokia(struct sfp *sfp) { sfp_fixup_long_startup(sfp); @@ -408,7 +413,19 @@ static void sfp_fixup_halny_gsfp(struct sfp *sfp) * these are possibly used for other purposes on this * module, e.g. a serial port. */ - sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS); + sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS); +} + +static void sfp_fixup_potron(struct sfp *sfp) +{ + /* + * The TX_FAULT and LOS pins on this device are used for serial + * communication, so ignore them. Additionally, provide extra + * time for this device to fully start up. + */ + + sfp_fixup_long_startup(sfp); + sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS); } static void sfp_fixup_rollball_cc(struct sfp *sfp) @@ -474,6 +491,9 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex, sfp_fixup_nokia), + // FLYPRO SFP-10GT-CS-30M uses Rollball protocol to talk to the PHY. + SFP_QUIRK_F("FLYPRO", "SFP-10GT-CS-30M", sfp_fixup_rollball), + // Fiberstore SFP-10G-T doesn't identify as copper, uses the Rollball // protocol to talk to the PHY and needs 4 sec wait before probing the // PHY. @@ -511,6 +531,8 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt), SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt), + SFP_QUIRK_F("YV", "SFP+ONU-XGSPON", sfp_fixup_potron), + // OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault), diff --git a/drivers/net/tun.c b/drivers/net/tun.c index fae1a0ab36bdf..fb9d425eff8c1 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1932,6 +1932,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, local_bh_enable(); goto unlock_frags; } + + if (frags && skb != tfile->napi.skb) + tfile->napi.skb = skb; } rcu_read_unlock(); local_bh_enable(); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 792ddda1ad493..85bd5d845409b 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -625,6 +625,21 @@ static void ax88772_suspend(struct usbnet *dev) asix_read_medium_status(dev, 1)); } +/* Notes on PM callbacks and locking context: + * + * - asix_suspend()/asix_resume() are invoked for both runtime PM and + * system-wide suspend/resume. For struct usb_driver the ->resume() + * callback does not receive pm_message_t, so the resume type cannot + * be distinguished here. + * + * - The MAC driver must hold RTNL when calling phylink interfaces such as + * phylink_suspend()/resume(). Those calls will also perform MDIO I/O. + * + * - Taking RTNL and doing MDIO from a runtime-PM resume callback (while + * the USB PM lock is held) is fragile. Since autosuspend brings no + * measurable power saving here, we block it by holding a PM usage + * reference in ax88772_bind(). + */ static int asix_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); @@ -919,6 +934,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) if (ret) goto initphy_err; + /* Keep this interface runtime-PM active by taking a usage ref. + * Prevents runtime suspend while bound and avoids resume paths + * that could deadlock (autoresume under RTNL while USB PM lock + * is held, phylink/MDIO wants RTNL). + */ + pm_runtime_get_noresume(&intf->dev); + return 0; initphy_err: @@ -948,6 +970,8 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) phylink_destroy(priv->phylink); ax88772_mdio_unregister(priv); asix_rx_fixup_common_free(dev->driver_priv); + /* Drop the PM usage ref taken in bind() */ + pm_runtime_put(&intf->dev); } static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -1600,6 +1624,11 @@ static struct usb_driver asix_driver = { .resume = asix_resume, .reset_resume = asix_resume, .disconnect = usbnet_disconnect, + /* usbnet enables autosuspend by default (supports_autosuspend=1). + * We keep runtime-PM active for AX88772* by taking a PM usage + * reference in ax88772_bind() (pm_runtime_get_noresume()) and + * dropping it in unbind(), which effectively blocks autosuspend. + */ .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 2f8637224b69e..2da67814556f9 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1920,13 +1920,19 @@ static const struct ethtool_ops lan78xx_ethtool_ops = { .get_regs = lan78xx_get_regs, }; -static void lan78xx_init_mac_address(struct lan78xx_net *dev) +static int lan78xx_init_mac_address(struct lan78xx_net *dev) { u32 addr_lo, addr_hi; u8 addr[6]; + int ret; + + ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); + if (ret < 0) + return ret; - lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); - lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); + ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); + if (ret < 0) + return ret; addr[0] = addr_lo & 0xFF; addr[1] = (addr_lo >> 8) & 0xFF; @@ -1959,14 +1965,26 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) (addr[2] << 16) | (addr[3] << 24); addr_hi = addr[4] | (addr[5] << 8); - lan78xx_write_reg(dev, RX_ADDRL, addr_lo); - lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + if (ret < 0) + return ret; } - lan78xx_write_reg(dev, MAF_LO(0), addr_lo); - lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + if (ret < 0) + return ret; eth_hw_addr_set(dev->net, addr); + + return 0; } /* MDIO read and write wrappers for phylib */ @@ -2905,8 +2923,6 @@ static int lan78xx_reset(struct lan78xx_net *dev) } } while (buf & HW_CFG_LRST_); - lan78xx_init_mac_address(dev); - /* save DEVID for later usage */ ret = lan78xx_read_reg(dev, ID_REV, &buf); if (ret < 0) @@ -2915,6 +2931,10 @@ static int lan78xx_reset(struct lan78xx_net *dev) dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16; dev->chiprev = buf & ID_REV_CHIP_REV_MASK_; + ret = lan78xx_init_mac_address(dev); + if (ret < 0) + return ret; + /* Respond to the IN token with a NAK */ ret = lan78xx_read_reg(dev, USB_CFG0, &buf); if (ret < 0) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2cab046749a92..3fcd2b736c5e3 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -10151,7 +10151,12 @@ static int __init rtl8152_driver_init(void) ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE); if (ret) return ret; - return usb_register(&rtl8152_driver); + + ret = usb_register(&rtl8152_driver); + if (ret) + usb_deregister_device_driver(&rtl8152_cfgselector_driver); + + return ret; } static void __exit rtl8152_driver_exit(void) diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index ddff6f19ff98e..278e6cb6f4d99 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -664,7 +664,6 @@ static void rtl8150_set_multicast(struct net_device *netdev) rtl8150_t *dev = netdev_priv(netdev); u16 rx_creg = 0x9e; - netif_stop_queue(netdev); if (netdev->flags & IFF_PROMISC) { rx_creg |= 0x0001; dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name); @@ -678,7 +677,6 @@ static void rtl8150_set_multicast(struct net_device *netdev) rx_creg &= 0x00fc; } async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg); - netif_wake_queue(netdev); } static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, @@ -687,9 +685,16 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, rtl8150_t *dev = netdev_priv(netdev); int count, res; + /* pad the frame and ensure terminating USB packet, datasheet 9.2.3 */ + count = max(skb->len, ETH_ZLEN); + if (count % 64 == 0) + count++; + if (skb_padto(skb, count)) { + netdev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + netif_stop_queue(netdev); - count = (skb->len < 60) ? 60 : skb->len; - count = (count & 0x3f) ? count : count + 1; dev->tx_skb = skb; usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), skb->data, count, write_bulk_callback, dev); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 09066e6aca402..fdab67a56e438 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1764,33 +1764,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch, int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { + unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ; unsigned long time_left, i; - time_left = wait_for_completion_timeout(&ar->wmi.service_ready, - WMI_SERVICE_READY_TIMEOUT_HZ); - if (!time_left) { - /* Sometimes the PCI HIF doesn't receive interrupt - * for the service ready message even if the buffer - * was completed. PCIe sniffer shows that it's - * because the corresponding CE ring doesn't fires - * it. Workaround here by polling CE rings once. - */ - ath10k_warn(ar, "failed to receive service ready completion, polling..\n"); - + /* Sometimes the PCI HIF doesn't receive interrupt + * for the service ready message even if the buffer + * was completed. PCIe sniffer shows that it's + * because the corresponding CE ring doesn't fires + * it. Workaround here by polling CE rings. Since + * the message could arrive at any time, continue + * polling until timeout. + */ + do { for (i = 0; i < CE_COUNT; i++) ath10k_hif_send_complete_check(ar, i, 1); + /* The 100 ms granularity is a tradeoff considering scheduler + * overhead and response latency + */ time_left = wait_for_completion_timeout(&ar->wmi.service_ready, - WMI_SERVICE_READY_TIMEOUT_HZ); - if (!time_left) { - ath10k_warn(ar, "polling timed out\n"); - return -ETIMEDOUT; - } - - ath10k_warn(ar, "service ready completion received, continuing normally\n"); - } + msecs_to_jiffies(100)); + if (time_left) + return 0; + } while (time_before(jiffies, timeout)); - return 0; + ath10k_warn(ar, "failed to receive service ready completion\n"); + return -ETIMEDOUT; } int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index bb46ef986b2a4..afac4a1e9a1db 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1936,14 +1936,10 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab) mutex_unlock(&ab->core_lock); ath11k_dp_free(ab); - ath11k_hal_srng_deinit(ab); + ath11k_hal_srng_clear(ab); ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1; - ret = ath11k_hal_srng_init(ab); - if (ret) - return ret; - clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); ret = ath11k_core_qmi_firmware_ready(ab); diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 65e52ab742b41..e47774b6d9926 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -1383,6 +1383,22 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab) } EXPORT_SYMBOL(ath11k_hal_srng_deinit); +void ath11k_hal_srng_clear(struct ath11k_base *ab) +{ + /* No need to memset rdp and wrp memory since each individual + * segment would get cleared in ath11k_hal_srng_src_hw_init() + * and ath11k_hal_srng_dst_hw_init(). + */ + memset(ab->hal.srng_list, 0, + sizeof(ab->hal.srng_list)); + memset(ab->hal.shadow_reg_addr, 0, + sizeof(ab->hal.shadow_reg_addr)); + ab->hal.avail_blk_resource = 0; + ab->hal.current_blk_index = 0; + ab->hal.num_shadow_reg_configured = 0; +} +EXPORT_SYMBOL(ath11k_hal_srng_clear); + void ath11k_hal_dump_srng_stats(struct ath11k_base *ab) { struct hal_srng *srng; diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index dc8bbe0730170..0f725668b819d 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -965,6 +965,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, struct hal_srng_params *params); int ath11k_hal_srng_init(struct ath11k_base *ath11k); void ath11k_hal_srng_deinit(struct ath11k_base *ath11k); +void ath11k_hal_srng_clear(struct ath11k_base *ab); void ath11k_hal_dump_srng_stats(struct ath11k_base *ab); void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab, u32 **cfg, u32 *len); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index a5555c959dec9..3ffb7723b6731 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -2550,7 +2550,7 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab) GFP_KERNEL); if (!m3_mem->vaddr) { ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n", - fw->size); + m3_len); ret = -ENOMEM; goto out; } diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c index b66d23d6b2bd9..bd21e8fe9c90b 100644 --- a/drivers/net/wireless/ath/ath12k/ce.c +++ b/drivers/net/wireless/ath/ath12k/ce.c @@ -388,7 +388,7 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe) } while ((skb = __skb_dequeue(&list))) { - ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n", + ath12k_dbg(ab, ATH12K_DBG_CE, "rx ce pipe %d len %d\n", pipe->pipe_num, skb->len); pipe->recv_cb(ab, skb); } diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h index f7005917362c6..ea711e02ca03c 100644 --- a/drivers/net/wireless/ath/ath12k/debug.h +++ b/drivers/net/wireless/ath/ath12k/debug.h @@ -26,6 +26,7 @@ enum ath12k_debug_mask { ATH12K_DBG_DP_TX = 0x00002000, ATH12K_DBG_DP_RX = 0x00004000, ATH12K_DBG_WOW = 0x00008000, + ATH12K_DBG_CE = 0x00010000, ATH12K_DBG_ANY = 0xffffffff, }; diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 4b3fbec397ac0..c15eecf2a1882 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -6733,15 +6733,15 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); - if (arvif->is_created) - goto flush; - if (vif->type == NL80211_IFTYPE_AP && ar->num_peers > (ar->max_num_peers - 1)) { ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); goto unlock; } + if (arvif->is_created) + goto flush; + if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", TARGET_NUM_VDEVS); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h index 81787501d4a4f..11704163876b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h @@ -12,7 +12,6 @@ #include "fw/api/phy.h" #include "fw/api/config.h" #include "fw/api/nvm-reg.h" -#include "fw/img.h" #include "iwl-trans.h" #define BIOS_SAR_MAX_PROFILE_NUM 4 diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 59bea82eab294..8801b93eacd42 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -684,10 +684,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy, return; } - /* Don't send world or same regdom info to firmware */ - if (strncmp(request->alpha2, "00", 2) && - strncmp(request->alpha2, adapter->country_code, - sizeof(request->alpha2))) { + /* Don't send same regdom info to firmware */ + if (strncmp(request->alpha2, adapter->country_code, + sizeof(request->alpha2)) != 0) { memcpy(adapter->country_code, request->alpha2, sizeof(request->alpha2)); mwifiex_send_domain_info_cmd_fw(wiphy); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c index ec02148a7f1f7..2ee8a6e1e310e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c @@ -48,7 +48,7 @@ mt76_wmac_probe(struct platform_device *pdev) return 0; error: - ieee80211_free_hw(mt76_hw(dev)); + mt76_free_device(mdev); return ret; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h index 509fb43d8a688..2908e8113e48a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h @@ -50,9 +50,9 @@ enum mt7915_eeprom_field { #define MT_EE_CAL_GROUP_SIZE_7975 (54 * MT_EE_CAL_UNIT + 16) #define MT_EE_CAL_GROUP_SIZE_7976 (94 * MT_EE_CAL_UNIT + 16) #define MT_EE_CAL_GROUP_SIZE_7916_6G (94 * MT_EE_CAL_UNIT + 16) +#define MT_EE_CAL_GROUP_SIZE_7981 (144 * MT_EE_CAL_UNIT + 16) #define MT_EE_CAL_DPD_SIZE_V1 (54 * MT_EE_CAL_UNIT) #define MT_EE_CAL_DPD_SIZE_V2 (300 * MT_EE_CAL_UNIT) -#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */ #define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0) #define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6) @@ -179,6 +179,8 @@ mt7915_get_cal_group_size(struct mt7915_dev *dev) val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val); return (val == MT_EE_V2_BAND_SEL_6GHZ) ? MT_EE_CAL_GROUP_SIZE_7916_6G : MT_EE_CAL_GROUP_SIZE_7916; + } else if (is_mt7981(&dev->mt76)) { + return MT_EE_CAL_GROUP_SIZE_7981; } else if (mt7915_check_adie(dev, false)) { return MT_EE_CAL_GROUP_SIZE_7976; } else { @@ -191,8 +193,6 @@ mt7915_get_cal_dpd_size(struct mt7915_dev *dev) { if (is_mt7915(&dev->mt76)) return MT_EE_CAL_DPD_SIZE_V1; - else if (is_mt7981(&dev->mt76)) - return MT_EE_CAL_DPD_SIZE_V2_7981; else return MT_EE_CAL_DPD_SIZE_V2; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 3398c25cb03c0..7b481aea76b6c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -3004,30 +3004,15 @@ static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw) /* 5G BW160 */ 5250, 5570, 5815 }; - static const u16 freq_list_v2_7981[] = { - /* 5G BW20 */ - 5180, 5200, 5220, 5240, - 5260, 5280, 5300, 5320, - 5500, 5520, 5540, 5560, - 5580, 5600, 5620, 5640, - 5660, 5680, 5700, 5720, - 5745, 5765, 5785, 5805, - 5825, 5845, 5865, 5885, - /* 5G BW160 */ - 5250, 5570, 5815 - }; - const u16 *freq_list = freq_list_v1; - int n_freqs = ARRAY_SIZE(freq_list_v1); - int idx; + const u16 *freq_list; + int idx, n_freqs; if (!is_mt7915(&dev->mt76)) { - if (is_mt7981(&dev->mt76)) { - freq_list = freq_list_v2_7981; - n_freqs = ARRAY_SIZE(freq_list_v2_7981); - } else { - freq_list = freq_list_v2; - n_freqs = ARRAY_SIZE(freq_list_v2); - } + freq_list = freq_list_v2; + n_freqs = ARRAY_SIZE(freq_list_v2); + } else { + freq_list = freq_list_v1; + n_freqs = ARRAY_SIZE(freq_list_v1); } if (freq < 4000) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c index e3459295ad884..5be0edb2fe9aa 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c @@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = { /* Netgear, Inc. [A8000,AXE3000] */ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM }, + /* Netgear, Inc. A7500 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9065, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM }, /* TP-Link TXE50UH */ { USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM }, diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c index 682db1bab21c6..a63ff630eaba9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c @@ -12,6 +12,9 @@ static const struct usb_device_id mt7925u_device_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7925, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM }, + /* Netgear, Inc. A9000 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9072, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM }, { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index c550385541143..91b7d35bdb431 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -679,6 +679,7 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev) static int mt7996_wed_rro_init(struct mt7996_dev *dev) { #ifdef CONFIG_NET_MEDIATEK_SOC_WED + u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff); struct mtk_wed_device *wed = &dev->mt76.mmio.wed; u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0; struct mt7996_wed_rro_addr *addr; @@ -718,7 +719,7 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev) addr = dev->wed_rro.addr_elem[i].ptr; for (j = 0; j < MT7996_RRO_WINDOW_MAX_SIZE; j++) { - addr->signature = 0xff; + addr->data = cpu_to_le32(val); addr++; } @@ -736,7 +737,7 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev) dev->wed_rro.session.ptr = ptr; addr = dev->wed_rro.session.ptr; for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) { - addr->signature = 0xff; + addr->data = cpu_to_le32(val); addr++; } @@ -836,6 +837,7 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev) static void mt7996_wed_rro_work(struct work_struct *work) { #ifdef CONFIG_NET_MEDIATEK_SOC_WED + u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff); struct mt7996_dev *dev; LIST_HEAD(list); @@ -872,7 +874,7 @@ static void mt7996_wed_rro_work(struct work_struct *work) MT7996_RRO_WINDOW_MAX_LEN; reset: elem = ptr + elem_id * sizeof(*elem); - elem->signature = 0xff; + elem->data |= cpu_to_le32(val); } mt7996_mcu_wed_rro_reset_sessions(dev, e->id); out: diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index 425fd030bee00..29e7289c3a169 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -194,13 +194,12 @@ struct mt7996_hif { int irq; }; +#define WED_RRO_ADDR_SIGNATURE_MASK GENMASK(31, 24) +#define WED_RRO_ADDR_COUNT_MASK GENMASK(14, 4) +#define WED_RRO_ADDR_HEAD_HIGH_MASK GENMASK(3, 0) struct mt7996_wed_rro_addr { - u32 head_low; - u32 head_high : 4; - u32 count: 11; - u32 oor: 1; - u32 rsv : 8; - u32 signature : 8; + __le32 head_low; + __le32 data; }; struct mt7996_wed_rro_session_id { diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c index 04056181368a6..dbd05612f2e4a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c @@ -132,6 +132,7 @@ static int mt7996_pci_probe(struct pci_dev *pdev, mdev = &dev->mt76; mt7996_wfsys_reset(dev); hif2 = mt7996_pci_init_hif2(pdev); + dev->hif2 = hif2; ret = mt7996_mmio_wed_init(dev, pdev, false, &irq); if (ret < 0) @@ -156,7 +157,6 @@ static int mt7996_pci_probe(struct pci_dev *pdev, if (hif2) { hif2_dev = container_of(hif2->dev, struct pci_dev, dev); - dev->hif2 = hif2; ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &hif2_irq); if (ret < 0) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c index c6f69d87c38d4..d07f0f75d23f2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c @@ -8170,8 +8170,6 @@ static const struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, -{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff), - .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff), diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index c9b9e2bc90cc4..1d75d8ec00166 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -291,7 +291,6 @@ static const struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ - {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/ diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index c336c66ac8e35..df8fad7a2aea6 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -960,6 +960,14 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, } } +static void rtw89_tx_wait_work(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + tx_wait_work.work); + + rtw89_tx_wait_list_clear(rtwdev); +} + void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel) { u8 ch_dma; @@ -970,32 +978,28 @@ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel) } int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, - int qsel, unsigned int timeout) + struct rtw89_tx_wait_info *wait, int qsel, + unsigned int timeout) { - struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); - struct rtw89_tx_wait_info *wait; unsigned long time_left; int ret = 0; - wait = kzalloc(sizeof(*wait), GFP_KERNEL); - if (!wait) { - rtw89_core_tx_kick_off(rtwdev, qsel); - return 0; - } - - init_completion(&wait->completion); - rcu_assign_pointer(skb_data->wait, wait); + lockdep_assert_wiphy(rtwdev->hw->wiphy); rtw89_core_tx_kick_off(rtwdev, qsel); time_left = wait_for_completion_timeout(&wait->completion, msecs_to_jiffies(timeout)); - if (time_left == 0) - ret = -ETIMEDOUT; - else if (!wait->tx_done) - ret = -EAGAIN; - rcu_assign_pointer(skb_data->wait, NULL); - kfree_rcu(wait, rcu_head); + if (time_left == 0) { + ret = -ETIMEDOUT; + list_add_tail(&wait->list, &rtwdev->tx_waits); + wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->tx_wait_work, + RTW89_TX_WAIT_WORK_TIMEOUT); + } else { + if (!wait->tx_done) + ret = -EAGAIN; + rtw89_tx_wait_release(wait); + } return ret; } @@ -1042,10 +1046,12 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev, } int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel) + struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel, + struct rtw89_tx_wait_info *wait) { struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); + struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); struct rtw89_core_tx_request tx_req = {0}; struct rtw89_sta_link *rtwsta_link = NULL; struct rtw89_vif_link *rtwvif_link; @@ -1078,6 +1084,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, rtw89_core_tx_update_desc_info(rtwdev, &tx_req); rtw89_core_tx_wake(rtwdev, &tx_req); + rcu_assign_pointer(skb_data->wait, wait); + ret = rtw89_hci_tx_write(rtwdev, &tx_req); if (ret) { rtw89_err(rtwdev, "failed to transmit skb to HCI\n"); @@ -2893,7 +2901,7 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev, goto out; } rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb); - ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL); + ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL, NULL); if (ret) { rtw89_err(rtwdev, "failed to push txq: %d\n", ret); ieee80211_free_txskb(rtwdev->hw, skb); @@ -3069,7 +3077,7 @@ static void rtw89_core_sta_pending_tx_iter(void *data, skb_queue_walk_safe(&rtwsta->roc_queue, skb, tmp) { skb_unlink(skb, &rtwsta->roc_queue); - ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel); + ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel, NULL); if (ret) { rtw89_warn(rtwdev, "pending tx failed with %d\n", ret); dev_kfree_skb_any(skb); @@ -3091,6 +3099,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, bool qos, bool ps) { struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); + struct rtw89_tx_wait_info *wait; struct ieee80211_sta *sta; struct ieee80211_hdr *hdr; struct sk_buff *skb; @@ -3099,6 +3108,12 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) return 0; + wait = kzalloc(sizeof(*wait), GFP_KERNEL); + if (!wait) + return -ENOMEM; + + init_completion(&wait->completion); + rcu_read_lock(); sta = ieee80211_find_sta(vif, vif->cfg.ap_addr); if (!sta) { @@ -3112,11 +3127,13 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, goto out; } + wait->skb = skb; + hdr = (struct ieee80211_hdr *)skb->data; if (ps) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); - ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel); + ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel, wait); if (ret) { rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret); dev_kfree_skb_any(skb); @@ -3125,10 +3142,11 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, rcu_read_unlock(); - return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel, + return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, wait, qsel, RTW89_ROC_TX_TIMEOUT); out: rcu_read_unlock(); + kfree(wait); return ret; } @@ -4419,6 +4437,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev) void rtw89_core_stop(struct rtw89_dev *rtwdev) { struct rtw89_btc *btc = &rtwdev->btc; + struct wiphy *wiphy = rtwdev->hw->wiphy; /* Prvent to stop twice; enter_ips and ops_stop */ if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) @@ -4437,6 +4456,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev) cancel_work_sync(&btc->dhcp_notify_work); cancel_work_sync(&btc->icmp_notify_work); cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work); + wiphy_delayed_work_cancel(wiphy, &rtwdev->tx_wait_work); cancel_delayed_work_sync(&rtwdev->track_work); cancel_delayed_work_sync(&rtwdev->chanctx_work); cancel_delayed_work_sync(&rtwdev->coex_act1_work); @@ -4657,6 +4677,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) continue; INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]); } + INIT_LIST_HEAD(&rtwdev->tx_waits); INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work); INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work); INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work); @@ -4666,6 +4687,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work); INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work); + wiphy_delayed_work_init(&rtwdev->tx_wait_work, rtw89_tx_wait_work); INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work); INIT_DELAYED_WORK(&rtwdev->antdiv_work, rtw89_phy_antdiv_work); rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 4f64ea392e6c9..7be32b83d4d64 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -3406,9 +3406,12 @@ struct rtw89_phy_rate_pattern { bool enable; }; +#define RTW89_TX_WAIT_WORK_TIMEOUT msecs_to_jiffies(500) struct rtw89_tx_wait_info { struct rcu_head rcu_head; + struct list_head list; struct completion completion; + struct sk_buff *skb; bool tx_done; }; @@ -5539,6 +5542,9 @@ struct rtw89_dev { /* used to protect rpwm */ spinlock_t rpwm_lock; + struct list_head tx_waits; + struct wiphy_delayed_work tx_wait_work; + struct rtw89_cam_info cam_info; struct sk_buff_head c2h_queue; @@ -5735,6 +5741,26 @@ u8 rtw89_sta_link_inst_get_index(struct rtw89_sta_link *rtwsta_link) return rtwsta_link - rtwsta->links_inst; } +static inline void rtw89_tx_wait_release(struct rtw89_tx_wait_info *wait) +{ + dev_kfree_skb_any(wait->skb); + kfree_rcu(wait, rcu_head); +} + +static inline void rtw89_tx_wait_list_clear(struct rtw89_dev *rtwdev) +{ + struct rtw89_tx_wait_info *wait, *tmp; + + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + list_for_each_entry_safe(wait, tmp, &rtwdev->tx_waits, list) { + if (!completion_done(&wait->completion)) + continue; + list_del(&wait->list); + rtw89_tx_wait_release(wait); + } +} + static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { @@ -5744,6 +5770,7 @@ static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev, static inline void rtw89_hci_reset(struct rtw89_dev *rtwdev) { rtwdev->hci.ops->reset(rtwdev); + rtw89_tx_wait_list_clear(rtwdev); } static inline int rtw89_hci_start(struct rtw89_dev *rtwdev) @@ -6745,11 +6772,12 @@ static inline struct sk_buff *rtw89_alloc_skb_for_rx(struct rtw89_dev *rtwdev, return dev_alloc_skb(length); } -static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev, +static inline bool rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev, struct rtw89_tx_skb_data *skb_data, bool tx_done) { struct rtw89_tx_wait_info *wait; + bool ret = false; rcu_read_lock(); @@ -6757,11 +6785,14 @@ static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev, if (!wait) goto out; + ret = true; wait->tx_done = tx_done; - complete(&wait->completion); + /* Don't access skb anymore after completion */ + complete_all(&wait->completion); out: rcu_read_unlock(); + return ret; } static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev) @@ -6787,12 +6818,14 @@ static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev) } int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel); + struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel, + struct rtw89_tx_wait_info *wait); int rtw89_h2c_tx(struct rtw89_dev *rtwdev, struct sk_buff *skb, bool fwdl); void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel); int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, - int qsel, unsigned int timeout); + struct rtw89_tx_wait_info *wait, int qsel, + unsigned int timeout); void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info, void *txdesc); diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 3a1a2b243adf0..d1f9bd41f8b5d 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -36,7 +36,7 @@ static void rtw89_ops_tx(struct ieee80211_hw *hw, return; } - ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel); + ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel, NULL); if (ret) { rtw89_err(rtwdev, "failed to transmit skb: %d\n", ret); ieee80211_free_txskb(hw, skb); diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index e203d3b2a8274..a87e1778a0d41 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -458,7 +458,8 @@ static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); struct ieee80211_tx_info *info; - rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE); + if (rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE)) + return; info = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(info); @@ -1365,7 +1366,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, struct pci_dev *pdev = rtwpci->pdev; struct sk_buff *skb = tx_req->skb; struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); - struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); bool en_wd_info = desc_info->en_wd_info; u32 txwd_len; u32 txwp_len; @@ -1381,7 +1381,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, } tx_data->dma = dma; - rcu_assign_pointer(skb_data->wait, NULL); txwp_len = sizeof(*txwp_info); txwd_len = chip->txwd_body_size; diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 02c2ac12f197a..2a303a758e276 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -207,7 +207,6 @@ static void rtw89_ser_hdl_work(struct work_struct *work) static int ser_send_msg(struct rtw89_ser *ser, u8 event) { - struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); struct ser_msg *msg = NULL; if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) @@ -223,7 +222,7 @@ static int ser_send_msg(struct rtw89_ser *ser, u8 event) list_add(&msg->list, &ser->msg_q); spin_unlock_irq(&ser->msg_q_lock); - ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work); + schedule_work(&ser->ser_hdl_work); return 0; } @@ -484,6 +483,7 @@ static void ser_l1_reset_pre_st_hdl(struct rtw89_ser *ser, u8 evt) static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt) { struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + struct wiphy *wiphy = rtwdev->hw->wiphy; switch (evt) { case SER_EV_STATE_IN: @@ -496,7 +496,9 @@ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt) } drv_stop_rx(ser); + wiphy_lock(wiphy); drv_trx_reset(ser); + wiphy_unlock(wiphy); /* wait m3 */ hal_send_m2_event(ser); diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c index 4ee3740804667..a77a27c36bdbe 100644 --- a/drivers/net/wireless/virtual/virt_wifi.c +++ b/drivers/net/wireless/virtual/virt_wifi.c @@ -277,7 +277,9 @@ static void virt_wifi_connect_complete(struct work_struct *work) priv->is_connected = true; /* Schedules an event that acquires the rtnl lock. */ - cfg80211_connect_result(priv->upperdev, requested_bss, NULL, 0, NULL, 0, + cfg80211_connect_result(priv->upperdev, + priv->is_connected ? fake_router_bssid : NULL, + NULL, 0, NULL, 0, status, GFP_KERNEL); netif_carrier_on(priv->upperdev); } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 561dd08022c06..24cff8b044923 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -131,12 +131,14 @@ void nvme_mpath_start_request(struct request *rq) struct nvme_ns *ns = rq->q->queuedata; struct gendisk *disk = ns->head->disk; - if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) { + if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) && + !(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) { atomic_inc(&ns->ctrl->nr_active); nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE; } - if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq)) + if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) || + (nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) return; nvme_req(rq)->flags |= NVME_MPATH_IO_STATS; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index fdc9f1df0578b..ff004350dc2c3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3140,10 +3140,12 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND * because of high power consumption (> 2 Watt) in s2idle * sleep. Only some boards with Intel CPU are affected. + * (Note for testing: Samsung 990 Evo Plus has same PCI ID) */ if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") || dmi_match(DMI_BOARD_NAME, "GMxPXxx") || dmi_match(DMI_BOARD_NAME, "GXxMRXx") || + dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") || dmi_match(DMI_BOARD_NAME, "PH4PG31") || dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") || dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71")) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 83a6b18b01ada..77df3432dfb78 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1075,6 +1075,9 @@ static void nvme_tcp_write_space(struct sock *sk) queue = sk->sk_user_data; if (likely(queue && sk_stream_is_writeable(sk))) { clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + /* Ensure pending TLS partial records are retried */ + if (nvme_tcp_queue_tls(queue)) + queue->write_space(sk); queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } read_unlock_bh(&sk->sk_callback_lock); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index ef8c5961e10c8..0ade23610ae64 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -54,6 +54,8 @@ struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ int ls_error; struct list_head lsreq_list; /* tgtport->ls_req_list */ bool req_queued; + + struct work_struct put_work; }; @@ -111,8 +113,6 @@ struct nvmet_fc_tgtport { struct nvmet_fc_port_entry *pe; struct kref ref; u32 max_sg_cnt; - - struct work_struct put_work; }; struct nvmet_fc_port_entry { @@ -235,12 +235,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); -static void nvmet_fc_put_tgtport_work(struct work_struct *work) +static void nvmet_fc_put_lsop_work(struct work_struct *work) { - struct nvmet_fc_tgtport *tgtport = - container_of(work, struct nvmet_fc_tgtport, put_work); + struct nvmet_fc_ls_req_op *lsop = + container_of(work, struct nvmet_fc_ls_req_op, put_work); - nvmet_fc_tgtport_put(tgtport); + nvmet_fc_tgtport_put(lsop->tgtport); + kfree(lsop); } static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, @@ -367,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) DMA_BIDIRECTIONAL); out_putwork: - queue_work(nvmet_wq, &tgtport->put_work); + queue_work(nvmet_wq, &lsop->put_work); } static int @@ -388,6 +389,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, lsreq->done = done; lsop->req_queued = false; INIT_LIST_HEAD(&lsop->lsreq_list); + INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work); lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, lsreq->rqstlen + lsreq->rsplen, @@ -447,8 +449,6 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) __nvmet_fc_finish_ls_req(lsop); /* fc-nvme target doesn't care about success or failure of cmd */ - - kfree(lsop); } /* @@ -1412,7 +1412,6 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); newrec->max_sg_cnt = template->max_sgl_segments; - INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); ret = nvmet_fc_alloc_ls_iodlist(newrec); if (ret) { diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c index 65d39e19f6eca..f381ce1e84bd3 100644 --- a/drivers/nvmem/layouts.c +++ b/drivers/nvmem/layouts.c @@ -45,11 +45,24 @@ static void nvmem_layout_bus_remove(struct device *dev) return drv->remove(layout); } +static int nvmem_layout_bus_uevent(const struct device *dev, + struct kobj_uevent_env *env) +{ + int ret; + + ret = of_device_uevent_modalias(dev, env); + if (ret != ENODEV) + return ret; + + return 0; +} + static const struct bus_type nvmem_layout_bus_type = { .name = "nvmem-layout", .match = nvmem_layout_bus_match, .probe = nvmem_layout_bus_probe, .remove = nvmem_layout_bus_remove, + .uevent = nvmem_layout_bus_uevent, }; int __nvmem_layout_driver_register(struct nvmem_layout_driver *drv, diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 9a72f75e5c2d8..63b5b435bd3ae 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -4191,6 +4191,7 @@ static int of_unittest_pci_node_verify(struct pci_dev *pdev, bool add) unittest(!np, "Child device tree node is not removed\n"); child_dev = device_find_any_child(&pdev->dev); unittest(!child_dev, "Child device is not removed\n"); + put_device(child_dev); } failed: diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index bae829ac759e1..c014220d2b24b 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -277,6 +277,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) if (!ret) offset = args.args[0]; + /* + * The PCIe Controller's registers have different "reset-values" + * depending on the "strap" settings programmed into the PCIEn_CTRL + * register within the CTRL_MMR memory-mapped register space. + * The registers latch onto a "reset-value" based on the "strap" + * settings sampled after the PCIe Controller is powered on. + * To ensure that the "reset-values" are sampled accurately, power + * off the PCIe Controller before programming the "strap" settings + * and power it on after that. The runtime PM APIs namely + * pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and + * increment the usage counter respectively, causing GENPD to power off + * and power on the PCIe Controller. + */ + ret = pm_runtime_put_sync(dev); + if (ret < 0) { + dev_err(dev, "Failed to power off PCIe Controller\n"); + return ret; + } + ret = j721e_pcie_set_mode(pcie, syscon, offset); if (ret < 0) { dev_err(dev, "Failed to set pci mode\n"); @@ -295,6 +314,12 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) return ret; } + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "Failed to power on PCIe Controller\n"); + return ret; + } + /* Enable ACSPCIE refclk output if the optional property exists */ syscon = syscon_regmap_lookup_by_phandle_optional(node, "ti,syscon-acspcie-proxy-ctrl"); @@ -531,7 +556,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) ret = j721e_pcie_ctrl_init(pcie); if (ret < 0) { - dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n"); + dev_err_probe(dev, ret, "j721e_pcie_ctrl_init failed\n"); goto err_get_sync; } diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 44b34559de1ac..4f75d13fe1dee 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -1202,8 +1202,8 @@ static int ks_pcie_probe(struct platform_device *pdev) if (irq < 0) return irq; - ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED, - "ks-pcie-error-irq", ks_pcie); + ret = devm_request_irq(dev, irq, ks_pcie_err_irq_handler, IRQF_SHARED, + "ks-pcie-error-irq", ks_pcie); if (ret < 0) { dev_err(dev, "failed to request error IRQ %d\n", irq); diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index 5d77a01648606..397c2f9477a15 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -182,8 +182,17 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar) return ret; } - if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) + if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) { reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc); + /* + * R-Car V4H Reference Manual R19UH0186EJ0130 Rev.1.30 Apr. + * 21, 2025 page 585 Figure 9.3.2 Software Reset flow (B) + * indicates that for peripherals in HSC domain, after + * reset has been asserted by writing a matching reset bit + * into register SRCR, it is mandatory to wait 1ms. + */ + fsleep(1000); + } val = readl(rcar->base + PCIEMSR0); if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) { @@ -204,6 +213,19 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar) if (ret) goto err_unprepare; + /* + * Assure the reset is latched and the core is ready for DBI access. + * On R-Car V4H, the PCIe reset is asynchronous and does not take + * effect immediately, but needs a short time to complete. In case + * DBI access happens in that short time, that access generates an + * SError. To make sure that condition can never happen, read back the + * state of the reset, which should turn the asynchronous reset into + * synchronous one, and wait a little over 1ms to add additional + * safety margin. + */ + reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc); + fsleep(1000); + if (rcar->drvdata->additional_common_init) rcar->drvdata->additional_common_init(rcar); @@ -701,7 +723,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22)); rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16)); rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6)); - rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(11, 0)); + rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(1, 0)); rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15)); rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26)); rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0); @@ -711,7 +733,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable val &= ~APP_HOLD_PHY_RST; writel(val, rcar->base + PCIERSTCTRL1); - ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000); + ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, val & BIT(18), 100, 10000); if (ret < 0) return ret; diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index ced3b7e7bdade..c2d626b090e3c 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -1205,6 +1205,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, struct mrq_uphy_response resp; struct tegra_bpmp_message msg; struct mrq_uphy_request req; + int err; /* * Controller-5 doesn't need to have its state set by BPMP-FW in @@ -1227,7 +1228,13 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, msg.rx.data = &resp; msg.rx.size = sizeof(resp); - return tegra_bpmp_transfer(pcie->bpmp, &msg); + err = tegra_bpmp_transfer(pcie->bpmp, &msg); + if (err) + return err; + if (msg.rx.ret) + return -EINVAL; + + return 0; } static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, @@ -1236,6 +1243,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, struct mrq_uphy_response resp; struct tegra_bpmp_message msg; struct mrq_uphy_request req; + int err; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); @@ -1255,7 +1263,13 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, msg.rx.data = &resp; msg.rx.size = sizeof(resp); - return tegra_bpmp_transfer(pcie->bpmp, &msg); + err = tegra_bpmp_transfer(pcie->bpmp, &msg); + if (err) + return err; + if (msg.rx.ret) + return -EINVAL; + + return 0; } static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) @@ -1721,9 +1735,9 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) ret); } - ret = tegra_pcie_bpmp_set_pll_state(pcie, false); + ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); if (ret) - dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); + dev_err(pcie->dev, "Failed to disable controller: %d\n", ret); pcie->ep_state = EP_STATE_DISABLED; dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); @@ -1940,6 +1954,15 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) return IRQ_HANDLED; } +static void tegra_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar; + + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) + dw_pcie_ep_reset_bar(pci, bar); +}; + static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq) { /* Tegra194 supports only INTA */ @@ -1954,10 +1977,10 @@ static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq) static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) { - if (unlikely(irq > 31)) + if (unlikely(irq > 32)) return -EINVAL; - appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1); + appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1); return 0; } @@ -2016,6 +2039,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) } static const struct dw_pcie_ep_ops pcie_ep_ops = { + .init = tegra_pcie_ep_init, .raise_irq = tegra_pcie_ep_raise_irq, .get_features = tegra_pcie_ep_get_features, }; diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index d7517c3976e7f..927ef421c4fc9 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -14,6 +14,7 @@ */ #include +#include #include #include #include @@ -269,7 +270,7 @@ struct tegra_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; struct mutex map_lock; - spinlock_t mask_lock; + raw_spinlock_t mask_lock; void *virt; dma_addr_t phys; int irq; @@ -1343,7 +1344,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port) unsigned int i; int err; - port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL); + port->phys = devm_kcalloc(dev, port->lanes, sizeof(phy), GFP_KERNEL); if (!port->phys) return -ENOMEM; @@ -1604,14 +1605,13 @@ static void tegra_msi_irq_mask(struct irq_data *d) struct tegra_msi *msi = irq_data_get_irq_chip_data(d); struct tegra_pcie *pcie = msi_to_pcie(msi); unsigned int index = d->hwirq / 32; - unsigned long flags; u32 value; - spin_lock_irqsave(&msi->mask_lock, flags); - value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); - value &= ~BIT(d->hwirq % 32); - afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); + value &= ~BIT(d->hwirq % 32); + afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); + } } static void tegra_msi_irq_unmask(struct irq_data *d) @@ -1619,14 +1619,13 @@ static void tegra_msi_irq_unmask(struct irq_data *d) struct tegra_msi *msi = irq_data_get_irq_chip_data(d); struct tegra_pcie *pcie = msi_to_pcie(msi); unsigned int index = d->hwirq / 32; - unsigned long flags; u32 value; - spin_lock_irqsave(&msi->mask_lock, flags); - value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); - value |= BIT(d->hwirq % 32); - afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); + value |= BIT(d->hwirq % 32); + afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); + } } static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -1736,7 +1735,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) int err; mutex_init(&msi->map_lock); - spin_lock_init(&msi->mask_lock); + raw_spin_lock_init(&msi->mask_lock); if (IS_ENABLED(CONFIG_PCI_MSI)) { err = tegra_allocate_domains(msi); diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 3dd653f3d7841..a90beedd8e24e 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -12,6 +12,7 @@ */ #include +#include #include #include #include @@ -37,7 +38,7 @@ struct rcar_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; struct mutex map_lock; - spinlock_t mask_lock; + raw_spinlock_t mask_lock; int irq1; int irq2; }; @@ -51,20 +52,13 @@ struct rcar_pcie_host { int (*phy_init_fn)(struct rcar_pcie_host *host); }; -static DEFINE_SPINLOCK(pmsr_lock); - static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base) { - unsigned long flags; u32 pmsr, val; int ret = 0; - spin_lock_irqsave(&pmsr_lock, flags); - - if (!pcie_base || pm_runtime_suspended(pcie_dev)) { - ret = -EINVAL; - goto unlock_exit; - } + if (!pcie_base || pm_runtime_suspended(pcie_dev)) + return -EINVAL; pmsr = readl(pcie_base + PMSR); @@ -86,8 +80,6 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base) writel(L1FAEG | PMEL1RX, pcie_base + PMSR); } -unlock_exit: - spin_unlock_irqrestore(&pmsr_lock, flags); return ret; } @@ -634,28 +626,26 @@ static void rcar_msi_irq_mask(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; - unsigned long flags; u32 value; - spin_lock_irqsave(&msi->mask_lock, flags); - value = rcar_pci_read_reg(pcie, PCIEMSIIER); - value &= ~BIT(d->hwirq); - rcar_pci_write_reg(pcie, value, PCIEMSIIER); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = rcar_pci_read_reg(pcie, PCIEMSIIER); + value &= ~BIT(d->hwirq); + rcar_pci_write_reg(pcie, value, PCIEMSIIER); + } } static void rcar_msi_irq_unmask(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; - unsigned long flags; u32 value; - spin_lock_irqsave(&msi->mask_lock, flags); - value = rcar_pci_read_reg(pcie, PCIEMSIIER); - value |= BIT(d->hwirq); - rcar_pci_write_reg(pcie, value, PCIEMSIIER); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = rcar_pci_read_reg(pcie, PCIEMSIIER); + value |= BIT(d->hwirq); + rcar_pci_write_reg(pcie, value, PCIEMSIIER); + } } static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -765,7 +755,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) int err; mutex_init(&msi->map_lock); - spin_lock_init(&msi->mask_lock); + raw_spin_lock_init(&msi->mask_lock); err = of_address_to_resource(dev->of_node, 0, &res); if (err) diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index a8ae14474dd0a..2e7356c28b5e4 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -721,9 +721,10 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | E_ECAM_CR_ENABLE, E_ECAM_CONTROL); - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | - (NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT), - E_ECAM_CONTROL); + ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL); + ecam_val &= ~E_ECAM_SIZE_LOC; + ecam_val |= NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT; + nwl_bridge_writel(pcie, ecam_val, E_ECAM_CONTROL); nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base), E_ECAM_BASE_LO); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 21aa3709e2577..eeb7fbc2d67a5 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -282,17 +282,20 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) if (!epf_test->dma_supported) return; - dma_release_channel(epf_test->dma_chan_tx); - if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { + if (epf_test->dma_chan_tx) { + dma_release_channel(epf_test->dma_chan_tx); + if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { + epf_test->dma_chan_tx = NULL; + epf_test->dma_chan_rx = NULL; + return; + } epf_test->dma_chan_tx = NULL; - epf_test->dma_chan_rx = NULL; - return; } - dma_release_channel(epf_test->dma_chan_rx); - epf_test->dma_chan_rx = NULL; - - return; + if (epf_test->dma_chan_rx) { + dma_release_channel(epf_test->dma_chan_rx); + epf_test->dma_chan_rx = NULL; + } } static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index aaa33e8dc4c97..44a33df3c69c2 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -581,15 +581,18 @@ static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs) if (dev->no_vf_scan) return 0; + pci_lock_rescan_remove(); for (i = 0; i < num_vfs; i++) { rc = pci_iov_add_virtfn(dev, i); if (rc) goto failed; } + pci_unlock_rescan_remove(); return 0; failed: while (i--) pci_iov_remove_virtfn(dev, i); + pci_unlock_rescan_remove(); return rc; } @@ -709,8 +712,10 @@ static void sriov_del_vfs(struct pci_dev *dev) struct pci_sriov *iov = dev->sriov; int i; + pci_lock_rescan_remove(); for (i = 0; i < iov->num_VFs; i++) pci_iov_remove_virtfn(dev, i); + pci_unlock_rescan_remove(); } static void sriov_disable(struct pci_dev *dev) diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 99c58ee09fbb0..0cd8a75e22580 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -122,6 +122,8 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge) { + bool ret = false; + if (ACPI_HANDLE(&host_bridge->dev)) { union acpi_object *obj; @@ -135,11 +137,11 @@ bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge) 1, DSM_PCI_PRESERVE_BOOT_CONFIG, NULL, ACPI_TYPE_INTEGER); if (obj && obj->integer.value == 0) - return true; + ret = true; ACPI_FREE(obj); } - return false; + return ret; } /* _HPX PCI Setting Record (Type 0); same as _HPP */ diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 35270172c8331..0c3aa91d1aee0 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1600,6 +1600,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type) switch (err_type) { case PCI_ERS_RESULT_NONE: case PCI_ERS_RESULT_CAN_RECOVER: + case PCI_ERS_RESULT_NEED_RESET: envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY"; envp[idx++] = "DEVICE_ONLINE=0"; break; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5af4a804a4f89..96f9cf9f8d643 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -200,8 +200,14 @@ static ssize_t max_link_width_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); + ssize_t ret; - return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); + /* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */ + pci_config_pm_runtime_get(pdev); + ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); + pci_config_pm_runtime_put(pdev); + + return ret; } static DEVICE_ATTR_RO(max_link_width); @@ -213,7 +219,10 @@ static ssize_t current_link_speed_show(struct device *dev, int err; enum pci_bus_speed speed; + pci_config_pm_runtime_get(pci_dev); err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL; @@ -230,7 +239,10 @@ static ssize_t current_link_width_show(struct device *dev, u16 linkstat; int err; + pci_config_pm_runtime_get(pci_dev); err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL; @@ -246,7 +258,10 @@ static ssize_t secondary_bus_number_show(struct device *dev, u8 sec_bus; int err; + pci_config_pm_runtime_get(pci_dev); err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL; @@ -262,7 +277,10 @@ static ssize_t subordinate_bus_number_show(struct device *dev, u8 sub_bus; int err; + pci_config_pm_runtime_get(pci_dev); err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 3d1365f558d3a..0dd548e2b3676 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -6048,6 +6048,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; int ret; + unsigned int firstbit; struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) @@ -6065,7 +6066,10 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) rq = mps; } - v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, ffs(rq) - 8); + firstbit = ffs(rq); + if (firstbit < 8) + return -EINVAL; + v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8); if (bridge->no_inc_mrrs) { int max_mrrs = pcie_get_readrq(dev); diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 13b8586924ead..e5cbea3a4968b 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -38,7 +38,7 @@ #define AER_ERROR_SOURCES_MAX 128 #define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */ -#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/ +#define AER_MAX_TYPEOF_UNCOR_ERRS 32 /* as per PCI_ERR_UNCOR_STATUS*/ struct aer_err_source { u32 status; /* PCI_ERR_ROOT_STATUS */ @@ -510,11 +510,11 @@ static const char *aer_uncorrectable_error_string[] = { "AtomicOpBlocked", /* Bit Position 24 */ "TLPBlockedErr", /* Bit Position 25 */ "PoisonTLPBlocked", /* Bit Position 26 */ - NULL, /* Bit Position 27 */ - NULL, /* Bit Position 28 */ - NULL, /* Bit Position 29 */ - NULL, /* Bit Position 30 */ - NULL, /* Bit Position 31 */ + "DMWrReqBlocked", /* Bit Position 27 */ + "IDECheck", /* Bit Position 28 */ + "MisIDETLP", /* Bit Position 29 */ + "PCRC_CHECK", /* Bit Position 30 */ + "TLPXlatBlocked", /* Bit Position 31 */ }; static const char *aer_agent_string[] = { diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 31090770fffcc..628d192de90b2 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -108,6 +108,12 @@ static int report_normal_detected(struct pci_dev *dev, void *data) return report_error_detected(dev, pci_channel_io_normal, data); } +static int report_perm_failure_detected(struct pci_dev *dev, void *data) +{ + pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT); + return 0; +} + static int report_mmio_enabled(struct pci_dev *dev, void *data) { struct pci_driver *pdrv; @@ -269,7 +275,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, failed: pci_walk_bridge(bridge, pci_pm_runtime_put, NULL); - pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT); + pci_walk_bridge(bridge, report_perm_failure_detected, NULL); /* TODO: Should kernel panic here? */ pci_info(bridge, "device recovery failed\n"); diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 978b239ec10bd..e6b0c8ec9c1fa 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -65,7 +65,7 @@ /* PMU registers occupy the 3rd 4KB page of each node's region */ #define CMN_PMU_OFFSET 0x2000 /* ...except when they don't :( */ -#define CMN_S3_DTM_OFFSET 0xa000 +#define CMN_S3_R1_DTM_OFFSET 0xa000 #define CMN_S3_PMU_OFFSET 0xd900 /* For most nodes, this is all there is */ @@ -233,6 +233,9 @@ enum cmn_revision { REV_CMN700_R1P0, REV_CMN700_R2P0, REV_CMN700_R3P0, + REV_CMNS3_R0P0 = 0, + REV_CMNS3_R0P1, + REV_CMNS3_R1P0, REV_CI700_R0P0 = 0, REV_CI700_R1P0, REV_CI700_R2P0, @@ -425,8 +428,8 @@ static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn) static int arm_cmn_pmu_offset(const struct arm_cmn *cmn, const struct arm_cmn_node *dn) { if (cmn->part == PART_CMN_S3) { - if (dn->type == CMN_TYPE_XP) - return CMN_S3_DTM_OFFSET; + if (cmn->rev >= REV_CMNS3_R1P0 && dn->type == CMN_TYPE_XP) + return CMN_S3_R1_DTM_OFFSET; return CMN_S3_PMU_OFFSET; } return CMN_PMU_OFFSET; diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 3569050f9cf37..abd23430dc033 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -96,7 +96,8 @@ struct arm_spe_pmu { #define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu)) /* Convert a free-running index from perf into an SPE buffer offset */ -#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) +#define PERF_IDX2OFF(idx, buf) \ + ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT)) /* Keep track of our dynamic hotplug state */ static enum cpuhp_state arm_spe_pmu_online; diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 918cdc31de572..e37682b280db5 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -198,7 +198,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event) return -EINVAL; hisi_pmu = to_hisi_pmu(event->pmu); - if (event->attr.config > hisi_pmu->check_event) + if ((event->attr.config & HISI_EVENTID_MASK) > hisi_pmu->check_event) return -EINVAL; if (hisi_pmu->on_cpu == -1) diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h index 25b2d43b72bf9..ab5d54170b416 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.h +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -43,7 +43,8 @@ return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config); \ } -#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff) +#define HISI_EVENTID_MASK GENMASK(7, 0) +#define HISI_GET_EVENTID(ev) ((ev)->hw.config_base & HISI_EVENTID_MASK) #define HISI_PMU_EVTYPE_BITS 8 #define HISI_PMU_EVTYPE_SHIFT(idx) ((idx) % 4 * HISI_PMU_EVTYPE_BITS) diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c index dddb66de6dba1..8d93a830ab8bf 100644 --- a/drivers/phy/cadence/cdns-dphy.c +++ b/drivers/phy/cadence/cdns-dphy.c @@ -30,6 +30,7 @@ #define DPHY_CMN_SSM DPHY_PMA_CMN(0x20) #define DPHY_CMN_SSM_EN BIT(0) +#define DPHY_CMN_SSM_CAL_WAIT_TIME GENMASK(8, 1) #define DPHY_CMN_TX_MODE_EN BIT(9) #define DPHY_CMN_PWM DPHY_PMA_CMN(0x40) @@ -79,6 +80,7 @@ struct cdns_dphy_cfg { u8 pll_ipdiv; u8 pll_opdiv; u16 pll_fbdiv; + u32 hs_clk_rate; unsigned int nlanes; }; @@ -99,6 +101,8 @@ struct cdns_dphy_ops { void (*set_pll_cfg)(struct cdns_dphy *dphy, const struct cdns_dphy_cfg *cfg); unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy); + int (*wait_for_pll_lock)(struct cdns_dphy *dphy); + int (*wait_for_cmn_ready)(struct cdns_dphy *dphy); }; struct cdns_dphy { @@ -108,6 +112,8 @@ struct cdns_dphy { struct clk *pll_ref_clk; const struct cdns_dphy_ops *ops; struct phy *phy; + bool is_configured; + bool is_powered; }; /* Order of bands is important since the index is the band number. */ @@ -154,6 +160,9 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy, cfg->pll_ipdiv, pll_ref_hz); + cfg->hs_clk_rate = div_u64((u64)pll_ref_hz * cfg->pll_fbdiv, + 2 * cfg->pll_opdiv * cfg->pll_ipdiv); + return 0; } @@ -191,6 +200,16 @@ static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy) return dphy->ops->get_wakeup_time_ns(dphy); } +static int cdns_dphy_wait_for_pll_lock(struct cdns_dphy *dphy) +{ + return dphy->ops->wait_for_pll_lock ? dphy->ops->wait_for_pll_lock(dphy) : 0; +} + +static int cdns_dphy_wait_for_cmn_ready(struct cdns_dphy *dphy) +{ + return dphy->ops->wait_for_cmn_ready ? dphy->ops->wait_for_cmn_ready(dphy) : 0; +} + static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy) { /* Default wakeup time is 800 ns (in a simulated environment). */ @@ -232,7 +251,6 @@ static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy) static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy, const struct cdns_dphy_cfg *cfg) { - u32 status; /* * set the PWM and PLL Byteclk divider settings to recommended values @@ -249,13 +267,6 @@ static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy, writel(DPHY_TX_J721E_WIZ_LANE_RSTB, dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL); - - readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status, - (status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US); - - readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status, - (status & DPHY_TX_WIZ_O_CMN_READY), 0, - POLL_TIMEOUT_US); } static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div) @@ -263,6 +274,23 @@ static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div) writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ); } +static int cdns_dphy_j721e_wait_for_pll_lock(struct cdns_dphy *dphy) +{ + u32 status; + + return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status, + status & DPHY_TX_WIZ_PLL_LOCK, 0, POLL_TIMEOUT_US); +} + +static int cdns_dphy_j721e_wait_for_cmn_ready(struct cdns_dphy *dphy) +{ + u32 status; + + return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status, + status & DPHY_TX_WIZ_O_CMN_READY, 0, + POLL_TIMEOUT_US); +} + /* * This is the reference implementation of DPHY hooks. Specific integration of * this IP may have to re-implement some of them depending on how they decided @@ -278,6 +306,8 @@ static const struct cdns_dphy_ops j721e_dphy_ops = { .get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns, .set_pll_cfg = cdns_dphy_j721e_set_pll_cfg, .set_psm_div = cdns_dphy_j721e_set_psm_div, + .wait_for_pll_lock = cdns_dphy_j721e_wait_for_pll_lock, + .wait_for_cmn_ready = cdns_dphy_j721e_wait_for_cmn_ready, }; static int cdns_dphy_config_from_opts(struct phy *phy, @@ -297,6 +327,7 @@ static int cdns_dphy_config_from_opts(struct phy *phy, if (ret) return ret; + opts->hs_clk_rate = cfg->hs_clk_rate; opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000; return 0; @@ -334,21 +365,36 @@ static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode, static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts) { struct cdns_dphy *dphy = phy_get_drvdata(phy); - struct cdns_dphy_cfg cfg = { 0 }; - int ret, band_ctrl; - unsigned int reg; + int ret; - ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg); - if (ret) - return ret; + ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &dphy->cfg); + if (!ret) + dphy->is_configured = true; + + return ret; +} + +static int cdns_dphy_power_on(struct phy *phy) +{ + struct cdns_dphy *dphy = phy_get_drvdata(phy); + int ret; + u32 reg; + + if (!dphy->is_configured || dphy->is_powered) + return -EINVAL; + + clk_prepare_enable(dphy->psm_clk); + clk_prepare_enable(dphy->pll_ref_clk); /* * Configure the internal PSM clk divider so that the DPHY has a * 1MHz clk (or something close). */ ret = cdns_dphy_setup_psm(dphy); - if (ret) - return ret; + if (ret) { + dev_err(&dphy->phy->dev, "Failed to setup PSM with error %d\n", ret); + goto err_power_on; + } /* * Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes @@ -363,40 +409,61 @@ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts) * Configure the DPHY PLL that will be used to generate the TX byte * clk. */ - cdns_dphy_set_pll_cfg(dphy, &cfg); + cdns_dphy_set_pll_cfg(dphy, &dphy->cfg); - band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate); - if (band_ctrl < 0) - return band_ctrl; + ret = cdns_dphy_tx_get_band_ctrl(dphy->cfg.hs_clk_rate); + if (ret < 0) { + dev_err(&dphy->phy->dev, "Failed to get band control value with error %d\n", ret); + goto err_power_on; + } - reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) | - FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl); + reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, ret) | + FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, ret); writel(reg, dphy->regs + DPHY_BAND_CFG); - return 0; -} + /* Start TX state machine. */ + reg = readl(dphy->regs + DPHY_CMN_SSM); + writel((reg & DPHY_CMN_SSM_CAL_WAIT_TIME) | DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN, + dphy->regs + DPHY_CMN_SSM); -static int cdns_dphy_power_on(struct phy *phy) -{ - struct cdns_dphy *dphy = phy_get_drvdata(phy); + ret = cdns_dphy_wait_for_pll_lock(dphy); + if (ret) { + dev_err(&dphy->phy->dev, "Failed to lock PLL with error %d\n", ret); + goto err_power_on; + } - clk_prepare_enable(dphy->psm_clk); - clk_prepare_enable(dphy->pll_ref_clk); + ret = cdns_dphy_wait_for_cmn_ready(dphy); + if (ret) { + dev_err(&dphy->phy->dev, "O_CMN_READY signal failed to assert with error %d\n", + ret); + goto err_power_on; + } - /* Start TX state machine. */ - writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN, - dphy->regs + DPHY_CMN_SSM); + dphy->is_powered = true; return 0; + +err_power_on: + clk_disable_unprepare(dphy->pll_ref_clk); + clk_disable_unprepare(dphy->psm_clk); + + return ret; } static int cdns_dphy_power_off(struct phy *phy) { struct cdns_dphy *dphy = phy_get_drvdata(phy); + u32 reg; clk_disable_unprepare(dphy->pll_ref_clk); clk_disable_unprepare(dphy->psm_clk); + /* Stop TX state machine. */ + reg = readl(dphy->regs + DPHY_CMN_SSM); + writel(reg & ~DPHY_CMN_SSM_EN, dphy->regs + DPHY_CMN_SSM); + + dphy->is_powered = false; + return 0; } diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c index 1ef6d9630f7e0..fbaeb7ca600d1 100644 --- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c +++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c @@ -122,6 +122,8 @@ struct rockchip_combphy_grfcfg { struct combphy_reg pipe_xpcs_phy_ready; struct combphy_reg pipe_pcie1l0_sel; struct combphy_reg pipe_pcie1l1_sel; + struct combphy_reg u3otg0_port_en; + struct combphy_reg u3otg1_port_en; }; struct rockchip_combphy_cfg { @@ -431,6 +433,14 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv) rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false); rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false); rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true); + switch (priv->id) { + case 0: + rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg0_port_en, true); + break; + case 1: + rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg1_port_en, true); + break; + } break; case PHY_TYPE_SATA: @@ -574,6 +584,8 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = { /* pipe-grf */ .pipe_con0_for_sata = { 0x0000, 15, 0, 0x00, 0x2220 }, .pipe_xpcs_phy_ready = { 0x0040, 2, 2, 0x00, 0x01 }, + .u3otg0_port_en = { 0x0104, 15, 0, 0x0181, 0x1100 }, + .u3otg1_port_en = { 0x0144, 15, 0, 0x0181, 0x1100 }, }; static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = { diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index 9171de657f978..a75762e4d2641 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -187,6 +187,9 @@ static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 }; static const unsigned int i2c_sck_c_dv19_pins[] = { GPIODV_19 }; static const unsigned int i2c_sda_c_dv18_pins[] = { GPIODV_18 }; +static const unsigned int i2c_sck_d_pins[] = { GPIOX_11 }; +static const unsigned int i2c_sda_d_pins[] = { GPIOX_10 }; + static const unsigned int eth_mdio_pins[] = { GPIOZ_0 }; static const unsigned int eth_mdc_pins[] = { GPIOZ_1 }; static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 }; @@ -411,6 +414,8 @@ static const struct meson_pmx_group meson_gxl_periphs_groups[] = { GPIO_GROUP(GPIO_TEST_N), /* Bank X */ + GROUP(i2c_sda_d, 5, 5), + GROUP(i2c_sck_d, 5, 4), GROUP(sdio_d0, 5, 31), GROUP(sdio_d1, 5, 30), GROUP(sdio_d2, 5, 29), @@ -651,6 +656,10 @@ static const char * const i2c_c_groups[] = { "i2c_sck_c", "i2c_sda_c", "i2c_sda_c_dv18", "i2c_sck_c_dv19", }; +static const char * const i2c_d_groups[] = { + "i2c_sck_d", "i2c_sda_d", +}; + static const char * const eth_groups[] = { "eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv", "eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3", @@ -777,6 +786,7 @@ static const struct meson_pmx_func meson_gxl_periphs_functions[] = { FUNCTION(i2c_a), FUNCTION(i2c_b), FUNCTION(i2c_c), + FUNCTION(i2c_d), FUNCTION(eth), FUNCTION(pwm_a), FUNCTION(pwm_b), diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 2c31e7f2a27a8..56e14193d678c 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -337,7 +337,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev, while (selector < nfuncs) { const char *fname = ops->get_function_name(pctldev, selector); - if (!strcmp(function, fname)) + if (fname && !strcmp(function, fname)) return selector; selector++; diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index bde58f5a743cb..698ab8cc970a6 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -1074,7 +1074,7 @@ static u32 rzg3s_oen_read(struct rzg2l_pinctrl *pctrl, unsigned int _pin) bit = rzg3s_pin_to_oen_bit(pctrl, _pin); if (bit < 0) - return bit; + return 0; return !(readb(pctrl->base + ETH_MODE) & BIT(bit)); } diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c index 29d16c9c1bd19..3a742f74ecd1d 100644 --- a/drivers/pinctrl/renesas/pinctrl.c +++ b/drivers/pinctrl/renesas/pinctrl.c @@ -726,7 +726,8 @@ static int sh_pfc_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group, struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev); const unsigned int *pins; unsigned int num_pins; - unsigned int i, ret; + unsigned int i; + int ret; pins = pmx->pfc->info->groups[group].pins; num_pins = pmx->pfc->info->groups[group].nr_pins; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index 7ffd2e193e425..02b53ae9d9aaf 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -393,10 +393,6 @@ extern const struct samsung_pinctrl_of_match_data exynosautov920_of_data; extern const struct samsung_pinctrl_of_match_data fsd_of_data; extern const struct samsung_pinctrl_of_match_data gs101_of_data; extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data; -extern const struct samsung_pinctrl_of_match_data s3c2412_of_data; -extern const struct samsung_pinctrl_of_match_data s3c2416_of_data; -extern const struct samsung_pinctrl_of_match_data s3c2440_of_data; -extern const struct samsung_pinctrl_of_match_data s3c2450_of_data; extern const struct samsung_pinctrl_of_match_data s5pv210_of_data; #endif /* __PINCTRL_SAMSUNG_H */ diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c index 8fcf38eed7f00..66deb475f807a 100644 --- a/drivers/platform/x86/amd/hsmp.c +++ b/drivers/platform/x86/amd/hsmp.c @@ -569,6 +569,11 @@ static ssize_t hsmp_metric_tbl_read(struct file *filp, struct kobject *kobj, if (!sock) return -EINVAL; + if (!sock->metric_tbl_addr) { + dev_err(sock->dev, "Metrics table address not available\n"); + return -ENOMEM; + } + /* Do not support lseek(), reads entire metric table */ if (count < bin_attr->size) { dev_err(sock->dev, "Wrong buffer size\n"); diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index 6f5437d210a61..9fd2829ee2ab4 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -233,6 +233,14 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"), } }, + { + .ident = "MECHREVO Yilong15Pro Series GM5HG7A", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MECHREVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "Yilong15Pro Series GM5HG7A"), + } + }, /* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */ { .ident = "PCSpecialist Lafite Pro V 14M", @@ -242,6 +250,13 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"), } }, + { + .ident = "TUXEDO Stellaris Slim 15 AMD Gen6", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"), + } + }, { .ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10", .driver_data = &quirk_spurious_8042, diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index 719caa2a00f05..8a1e2268d301a 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -406,6 +406,7 @@ static const struct acpi_device_id amd_pmf_acpi_ids[] = { {"AMDI0103", 0}, {"AMDI0105", 0}, {"AMDI0107", 0}, + {"AMDI0108", 0}, { } }; MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids); diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c index 4b57102c7f627..6af6cf477c5b5 100644 --- a/drivers/platform/x86/lg-laptop.c +++ b/drivers/platform/x86/lg-laptop.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include @@ -75,6 +76,9 @@ MODULE_PARM_DESC(fw_debug, "Enable printing of firmware debug messages"); #define WMBB_USB_CHARGE 0x10B #define WMBB_BATT_LIMIT 0x10C +#define FAN_MODE_LOWER GENMASK(1, 0) +#define FAN_MODE_UPPER GENMASK(5, 4) + #define PLATFORM_NAME "lg-laptop" MODULE_ALIAS("wmi:" WMI_EVENT_GUID0); @@ -274,29 +278,19 @@ static ssize_t fan_mode_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { - bool value; + unsigned long value; union acpi_object *r; - u32 m; int ret; - ret = kstrtobool(buffer, &value); + ret = kstrtoul(buffer, 10, &value); if (ret) return ret; + if (value >= 3) + return -EINVAL; - r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0); - if (!r) - return -EIO; - - if (r->type != ACPI_TYPE_INTEGER) { - kfree(r); - return -EIO; - } - - m = r->integer.value; - kfree(r); - r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4)); - kfree(r); - r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value); + r = lg_wmab(dev, WM_FAN_MODE, WM_SET, + FIELD_PREP(FAN_MODE_LOWER, value) | + FIELD_PREP(FAN_MODE_UPPER, value)); kfree(r); return count; @@ -305,7 +299,7 @@ static ssize_t fan_mode_store(struct device *dev, static ssize_t fan_mode_show(struct device *dev, struct device_attribute *attr, char *buffer) { - unsigned int status; + unsigned int mode; union acpi_object *r; r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0); @@ -317,10 +311,10 @@ static ssize_t fan_mode_show(struct device *dev, return -EIO; } - status = r->integer.value & 0x01; + mode = FIELD_GET(FAN_MODE_LOWER, r->integer.value); kfree(r); - return sysfs_emit(buffer, "%d\n", status); + return sysfs_emit(buffer, "%d\n", mode); } static ssize_t usb_charge_store(struct device *dev, diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c index f63c3c4104515..382dff8805c62 100644 --- a/drivers/power/supply/cw2015_battery.c +++ b/drivers/power/supply/cw2015_battery.c @@ -702,8 +702,7 @@ static int cw_bat_probe(struct i2c_client *client) if (!cw_bat->battery_workqueue) return -ENOMEM; - devm_delayed_work_autocancel(&client->dev, - &cw_bat->battery_delay_work, cw_bat_work); + devm_delayed_work_autocancel(&client->dev, &cw_bat->battery_delay_work, cw_bat_work); queue_delayed_work(cw_bat->battery_workqueue, &cw_bat->battery_delay_work, msecs_to_jiffies(10)); return 0; diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c index d7e520da76886..e2424efb266df 100644 --- a/drivers/power/supply/max77976_charger.c +++ b/drivers/power/supply/max77976_charger.c @@ -292,10 +292,10 @@ static int max77976_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_ONLINE: err = max77976_get_online(chg, &val->intval); break; - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: val->intval = MAX77976_CHG_CC_MAX; break; - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: err = max77976_get_integer(chg, CHG_CC, MAX77976_CHG_CC_MIN, MAX77976_CHG_CC_MAX, @@ -330,7 +330,7 @@ static int max77976_set_property(struct power_supply *psy, int err = 0; switch (psp) { - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: err = max77976_set_integer(chg, CHG_CC, MAX77976_CHG_CC_MIN, MAX77976_CHG_CC_MAX, @@ -355,7 +355,7 @@ static int max77976_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { switch (psp) { - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: return true; default: @@ -368,8 +368,8 @@ static enum power_supply_property max77976_psy_props[] = { POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_ONLINE, - POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, - POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index 92d1b62ea239d..e9389876229ea 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c @@ -109,16 +109,13 @@ struct pps_device *pps_register_source(struct pps_source_info *info, if (err < 0) { pr_err("%s: unable to create char device\n", info->name); - goto kfree_pps; + goto pps_register_source_exit; } dev_dbg(&pps->dev, "new PPS source %s\n", info->name); return pps; -kfree_pps: - kfree(pps); - pps_register_source_exit: pr_err("%s: unable to register source\n", info->name); diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index 9463232af8d2e..c6b8b64782761 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c @@ -374,6 +374,7 @@ int pps_register_cdev(struct pps_device *pps) pps->info.name); err = -EBUSY; } + kfree(pps); goto out_unlock; } pps->id = err; @@ -383,13 +384,11 @@ int pps_register_cdev(struct pps_device *pps) pps->dev.devt = MKDEV(pps_major, pps->id); dev_set_drvdata(&pps->dev, pps); dev_set_name(&pps->dev, "pps%d", pps->id); + pps->dev.release = pps_device_destruct; err = device_register(&pps->dev); if (err) goto free_idr; - /* Override the release function with our own */ - pps->dev.release = pps_device_destruct; - pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major, pps->id); diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index efbd80db778d6..bd9919c01e502 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -2546,7 +2546,7 @@ ptp_ocp_sma_fb_init(struct ptp_ocp *bp) for (i = 0; i < OCP_SMA_NUM; i++) { bp->sma[i].fixed_fcn = true; bp->sma[i].fixed_dir = true; - bp->sma[1].dpll_prop.capabilities &= + bp->sma[i].dpll_prop.capabilities &= ~DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE; } return; diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index b352df4cd3f97..f329263f33aa1 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -22,6 +22,7 @@ #define PTP_MAX_TIMESTAMPS 128 #define PTP_BUF_TIMESTAMPS 30 #define PTP_DEFAULT_MAX_VCLOCKS 20 +#define PTP_MAX_VCLOCKS_LIMIT (KMALLOC_MAX_SIZE/(sizeof(int))) #define PTP_MAX_CHANNELS 2048 enum { diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 6b1b8f57cd951..200eaf5006968 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -284,7 +284,7 @@ static ssize_t max_vclocks_store(struct device *dev, size_t size; u32 max; - if (kstrtou32(buf, 0, &max) || max == 0) + if (kstrtou32(buf, 0, &max) || max == 0 || max > PTP_MAX_VCLOCKS_LIMIT) return -EINVAL; if (max == ptp->max_vclocks) diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index 831aed228cafc..858d369913742 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -234,7 +234,7 @@ static int berlin_pwm_suspend(struct device *dev) for (i = 0; i < chip->npwm; i++) { struct berlin_pwm_channel *channel = &bpc->channel[i]; - channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE); + channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_EN); channel->ctrl = berlin_pwm_readl(bpc, i, BERLIN_PWM_CONTROL); channel->duty = berlin_pwm_readl(bpc, i, BERLIN_PWM_DUTY); channel->tcnt = berlin_pwm_readl(bpc, i, BERLIN_PWM_TCNT); @@ -262,7 +262,7 @@ static int berlin_pwm_resume(struct device *dev) berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL); berlin_pwm_writel(bpc, i, channel->duty, BERLIN_PWM_DUTY); berlin_pwm_writel(bpc, i, channel->tcnt, BERLIN_PWM_TCNT); - berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_ENABLE); + berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_EN); } return 0; diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index 0125e73b98dfb..7a86cb090f76f 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -36,7 +36,7 @@ #define CLKDIV_MAX 7 #define HSPCLKDIV_MAX 7 -#define PERIOD_MAX 0xFFFF +#define PERIOD_MAX 0x10000 /* compare module registers */ #define CMPA 0x12 @@ -65,14 +65,10 @@ #define AQCTL_ZRO_FRCHIGH BIT(1) #define AQCTL_ZRO_FRCTOGGLE (BIT(1) | BIT(0)) -#define AQCTL_CHANA_POLNORMAL (AQCTL_CAU_FRCLOW | AQCTL_PRD_FRCHIGH | \ - AQCTL_ZRO_FRCHIGH) -#define AQCTL_CHANA_POLINVERSED (AQCTL_CAU_FRCHIGH | AQCTL_PRD_FRCLOW | \ - AQCTL_ZRO_FRCLOW) -#define AQCTL_CHANB_POLNORMAL (AQCTL_CBU_FRCLOW | AQCTL_PRD_FRCHIGH | \ - AQCTL_ZRO_FRCHIGH) -#define AQCTL_CHANB_POLINVERSED (AQCTL_CBU_FRCHIGH | AQCTL_PRD_FRCLOW | \ - AQCTL_ZRO_FRCLOW) +#define AQCTL_CHANA_POLNORMAL (AQCTL_CAU_FRCLOW | AQCTL_ZRO_FRCHIGH) +#define AQCTL_CHANA_POLINVERSED (AQCTL_CAU_FRCHIGH | AQCTL_ZRO_FRCLOW) +#define AQCTL_CHANB_POLNORMAL (AQCTL_CBU_FRCLOW | AQCTL_ZRO_FRCHIGH) +#define AQCTL_CHANB_POLINVERSED (AQCTL_CBU_FRCHIGH | AQCTL_ZRO_FRCLOW) #define AQSFRC_RLDCSF_MASK (BIT(7) | BIT(6)) #define AQSFRC_RLDCSF_ZRO 0 @@ -108,7 +104,6 @@ struct ehrpwm_pwm_chip { unsigned long clk_rate; void __iomem *mmio_base; unsigned long period_cycles[NUM_PWM_CHANNEL]; - enum pwm_polarity polarity[NUM_PWM_CHANNEL]; struct clk *tbclk; struct ehrpwm_context ctx; }; @@ -166,7 +161,7 @@ static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div, *prescale_div = (1 << clkdiv) * (hspclkdiv ? (hspclkdiv * 2) : 1); - if (*prescale_div > rqst_prescaler) { + if (*prescale_div >= rqst_prescaler) { *tb_clk_div = (clkdiv << TBCTL_CLKDIV_SHIFT) | (hspclkdiv << TBCTL_HSPCLKDIV_SHIFT); return 0; @@ -177,51 +172,20 @@ static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div, return 1; } -static void configure_polarity(struct ehrpwm_pwm_chip *pc, int chan) -{ - u16 aqctl_val, aqctl_mask; - unsigned int aqctl_reg; - - /* - * Configure PWM output to HIGH/LOW level on counter - * reaches compare register value and LOW/HIGH level - * on counter value reaches period register value and - * zero value on counter - */ - if (chan == 1) { - aqctl_reg = AQCTLB; - aqctl_mask = AQCTL_CBU_MASK; - - if (pc->polarity[chan] == PWM_POLARITY_INVERSED) - aqctl_val = AQCTL_CHANB_POLINVERSED; - else - aqctl_val = AQCTL_CHANB_POLNORMAL; - } else { - aqctl_reg = AQCTLA; - aqctl_mask = AQCTL_CAU_MASK; - - if (pc->polarity[chan] == PWM_POLARITY_INVERSED) - aqctl_val = AQCTL_CHANA_POLINVERSED; - else - aqctl_val = AQCTL_CHANA_POLNORMAL; - } - - aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK; - ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val); -} - /* * period_ns = 10^9 * (ps_divval * period_cycles) / PWM_CLK_RATE * duty_ns = 10^9 * (ps_divval * duty_cycles) / PWM_CLK_RATE */ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - u64 duty_ns, u64 period_ns) + u64 duty_ns, u64 period_ns, enum pwm_polarity polarity) { struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); u32 period_cycles, duty_cycles; u16 ps_divval, tb_divval; unsigned int i, cmp_reg; unsigned long long c; + u16 aqctl_val, aqctl_mask; + unsigned int aqctl_reg; if (period_ns > NSEC_PER_SEC) return -ERANGE; @@ -231,15 +195,10 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, do_div(c, NSEC_PER_SEC); period_cycles = (unsigned long)c; - if (period_cycles < 1) { - period_cycles = 1; - duty_cycles = 1; - } else { - c = pc->clk_rate; - c = c * duty_ns; - do_div(c, NSEC_PER_SEC); - duty_cycles = (unsigned long)c; - } + c = pc->clk_rate; + c = c * duty_ns; + do_div(c, NSEC_PER_SEC); + duty_cycles = (unsigned long)c; /* * Period values should be same for multiple PWM channels as IP uses @@ -265,52 +224,73 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, pc->period_cycles[pwm->hwpwm] = period_cycles; /* Configure clock prescaler to support Low frequency PWM wave */ - if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval, + if (set_prescale_div(DIV_ROUND_UP(period_cycles, PERIOD_MAX), &ps_divval, &tb_divval)) { dev_err(pwmchip_parent(chip), "Unsupported values\n"); return -EINVAL; } - pm_runtime_get_sync(pwmchip_parent(chip)); - - /* Update clock prescaler values */ - ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval); - /* Update period & duty cycle with presacler division */ period_cycles = period_cycles / ps_divval; duty_cycles = duty_cycles / ps_divval; - /* Configure shadow loading on Period register */ - ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW); + if (period_cycles < 1) + period_cycles = 1; - ehrpwm_write(pc->mmio_base, TBPRD, period_cycles); + pm_runtime_get_sync(pwmchip_parent(chip)); - /* Configure ehrpwm counter for up-count mode */ - ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK, - TBCTL_CTRMODE_UP); + /* Update clock prescaler values */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval); - if (pwm->hwpwm == 1) + if (pwm->hwpwm == 1) { /* Channel 1 configured with compare B register */ cmp_reg = CMPB; - else + + aqctl_reg = AQCTLB; + aqctl_mask = AQCTL_CBU_MASK; + + if (polarity == PWM_POLARITY_INVERSED) + aqctl_val = AQCTL_CHANB_POLINVERSED; + else + aqctl_val = AQCTL_CHANB_POLNORMAL; + + /* if duty_cycle is big, don't toggle on CBU */ + if (duty_cycles > period_cycles) + aqctl_val &= ~AQCTL_CBU_MASK; + + } else { /* Channel 0 configured with compare A register */ cmp_reg = CMPA; - ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles); + aqctl_reg = AQCTLA; + aqctl_mask = AQCTL_CAU_MASK; - pm_runtime_put_sync(pwmchip_parent(chip)); + if (polarity == PWM_POLARITY_INVERSED) + aqctl_val = AQCTL_CHANA_POLINVERSED; + else + aqctl_val = AQCTL_CHANA_POLNORMAL; - return 0; -} + /* if duty_cycle is big, don't toggle on CAU */ + if (duty_cycles > period_cycles) + aqctl_val &= ~AQCTL_CAU_MASK; + } -static int ehrpwm_pwm_set_polarity(struct pwm_chip *chip, - struct pwm_device *pwm, - enum pwm_polarity polarity) -{ - struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); + aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK; + ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val); + + /* Configure shadow loading on Period register */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW); + + ehrpwm_write(pc->mmio_base, TBPRD, period_cycles - 1); - /* Configuration of polarity in hardware delayed, do at enable */ - pc->polarity[pwm->hwpwm] = polarity; + /* Configure ehrpwm counter for up-count mode */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK, + TBCTL_CTRMODE_UP); + + if (!(duty_cycles > period_cycles)) + ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles); + + pm_runtime_put_sync(pwmchip_parent(chip)); return 0; } @@ -339,9 +319,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); - /* Channels polarity can be configured from action qualifier module */ - configure_polarity(pc, pwm->hwpwm); - /* Enable TBCLK */ ret = clk_enable(pc->tbclk); if (ret) { @@ -391,12 +368,7 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); - if (pwm_is_enabled(pwm)) { - dev_warn(pwmchip_parent(chip), "Removing PWM device without disabling\n"); - pm_runtime_put_sync(pwmchip_parent(chip)); - } - - /* set period value to zero on free */ + /* Don't let a pwm without consumer block requests to the other channel */ pc->period_cycles[pwm->hwpwm] = 0; } @@ -411,10 +383,6 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, ehrpwm_pwm_disable(chip, pwm); enabled = false; } - - err = ehrpwm_pwm_set_polarity(chip, pwm, state->polarity); - if (err) - return err; } if (!state->enabled) { @@ -423,7 +391,7 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period); + err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period, state->polarity); if (err) return err; diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c index 9df726f10ad12..6d609c42e4793 100644 --- a/drivers/regulator/scmi-regulator.c +++ b/drivers/regulator/scmi-regulator.c @@ -257,7 +257,8 @@ static int process_scmi_regulator_of_node(struct scmi_device *sdev, struct device_node *np, struct scmi_regulator_info *rinfo) { - u32 dom, ret; + u32 dom; + int ret; ret = of_property_read_u32(np, "reg", &dom); if (ret) diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c index 327f0c7ee3d6b..7ffbae209a31f 100644 --- a/drivers/remoteproc/pru_rproc.c +++ b/drivers/remoteproc/pru_rproc.c @@ -340,7 +340,7 @@ EXPORT_SYMBOL_GPL(pru_rproc_put); */ int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr) { - struct pru_rproc *pru = rproc->priv; + struct pru_rproc *pru; unsigned int reg; u32 mask, set; u16 idx; @@ -352,6 +352,7 @@ int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr) if (!rproc->dev.parent || !is_pru_rproc(rproc->dev.parent)) return -ENODEV; + pru = rproc->priv; /* pointer is 16 bit and index is 8-bit so mask out the rest */ idx_mask = (c >= PRU_C28) ? 0xFFFF : 0xFF; diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c index 4ee5e67a9f03f..769c6d6d6a731 100644 --- a/drivers/remoteproc/qcom_q6v5.c +++ b/drivers/remoteproc/qcom_q6v5.c @@ -156,9 +156,6 @@ int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout) int ret; ret = wait_for_completion_timeout(&q6v5->start_done, timeout); - if (!ret) - disable_irq(q6v5->handover_irq); - return !ret ? -ETIMEDOUT : 0; } EXPORT_SYMBOL_GPL(qcom_q6v5_wait_for_start); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index aaf76406cd7d7..39db12f267cc6 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -443,6 +443,29 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) else err = rtc->ops->set_alarm(rtc->dev.parent, alarm); + /* + * Check for potential race described above. If the waiting for next + * second, and the second just ticked since the check above, either + * + * 1) It ticked after the alarm was set, and an alarm irq should be + * generated. + * + * 2) It ticked before the alarm was set, and alarm irq most likely will + * not be generated. + * + * While we cannot easily check for which of these two scenarios we + * are in, we can return -ETIME to signal that the timer has already + * expired, which is true in both cases. + */ + if ((scheduled - now) <= 1) { + err = __rtc_read_time(rtc, &tm); + if (err) + return err; + now = rtc_tm_to_time64(&tm); + if (scheduled <= now) + return -ETIME; + } + trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err); return err; } @@ -594,6 +617,10 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); rtc->uie_rtctimer.period = ktime_set(1, 0); err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); + if (!err && rtc->ops && rtc->ops->alarm_irq_enable) + err = rtc->ops->alarm_irq_enable(rtc->dev.parent, 1); + if (err) + goto out; } else { rtc_timer_remove(rtc, &rtc->uie_rtctimer); } diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c index 9f8b5d4a8f6b6..6b77c122fdc10 100644 --- a/drivers/rtc/rtc-optee.c +++ b/drivers/rtc/rtc-optee.c @@ -320,6 +320,7 @@ static int optee_rtc_remove(struct device *dev) { struct optee_rtc *priv = dev_get_drvdata(dev); + tee_shm_free(priv->shm); tee_client_close_session(priv->ctx, priv->session_id); tee_client_close_context(priv->ctx); diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c index 4bcd7ca32f27b..b8a0fccef14e0 100644 --- a/drivers/rtc/rtc-x1205.c +++ b/drivers/rtc/rtc-x1205.c @@ -669,7 +669,7 @@ static const struct i2c_device_id x1205_id[] = { MODULE_DEVICE_TABLE(i2c, x1205_id); static const struct of_device_id x1205_dt_ids[] = { - { .compatible = "xircom,x1205", }, + { .compatible = "xicor,x1205", }, {}, }; MODULE_DEVICE_TABLE(of, x1205_dt_ids); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 42a4a996defbe..93f346d313e98 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -332,6 +332,11 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) lim.max_dev_sectors = device->discipline->max_sectors(block); lim.max_hw_sectors = lim.max_dev_sectors; lim.logical_block_size = block->bp_block; + /* + * Adjust dma_alignment to match block_size - 1 + * to ensure proper buffer alignment checks in the block layer. + */ + lim.dma_alignment = lim.logical_block_size - 1; if (device->discipline->has_discard) { unsigned int max_bytes; @@ -3112,12 +3117,14 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, PTR_ERR(cqr) == -ENOMEM || PTR_ERR(cqr) == -EAGAIN) { rc = BLK_STS_RESOURCE; - goto out; + } else if (PTR_ERR(cqr) == -EINVAL) { + rc = BLK_STS_INVAL; + } else { + DBF_DEV_EVENT(DBF_ERR, basedev, + "CCW creation failed (rc=%ld) on request %p", + PTR_ERR(cqr), req); + rc = BLK_STS_IOERR; } - DBF_DEV_EVENT(DBF_ERR, basedev, - "CCW creation failed (rc=%ld) on request %p", - PTR_ERR(cqr), req); - rc = BLK_STS_IOERR; goto out; } /* diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 9498825d9c7a5..858a9e171351b 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1318,23 +1318,34 @@ void ccw_device_schedule_recovery(void) spin_unlock_irqrestore(&recovery_lock, flags); } -static int purge_fn(struct device *dev, void *data) +static int purge_fn(struct subchannel *sch, void *data) { - struct ccw_device *cdev = to_ccwdev(dev); - struct ccw_dev_id *id = &cdev->private->dev_id; - struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_device *cdev; - spin_lock_irq(cdev->ccwlock); - if (is_blacklisted(id->ssid, id->devno) && - (cdev->private->state == DEV_STATE_OFFLINE) && - (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { - CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, - id->devno); + spin_lock_irq(&sch->lock); + if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv) + goto unlock; + + if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) + goto unlock; + + cdev = sch_get_cdev(sch); + if (cdev) { + if (cdev->private->state != DEV_STATE_OFFLINE) + goto unlock; + + if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) + goto unlock; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); - css_sched_sch_todo(sch, SCH_TODO_UNREG); atomic_set(&cdev->private->onoff, 0); } - spin_unlock_irq(cdev->ccwlock); + + css_sched_sch_todo(sch, SCH_TODO_UNREG); + CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid, + sch->schib.pmcw.dev, cdev ? "" : " (no cdev)"); + +unlock: + spin_unlock_irq(&sch->lock); /* Abort loop in case of pending signal. */ if (signal_pending(current)) return -EINTR; @@ -1350,7 +1361,7 @@ static int purge_fn(struct device *dev, void *data) int ccw_purge_blacklisted(void) { CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); - bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); + for_each_subchannel_staged(purge_fn, NULL, NULL); return 0; } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 0c49414c1f350..6cb6586f4790b 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6528,18 +6528,21 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, while (left) { sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; buff_size[sg_used] = sz; - buff[sg_used] = kmalloc(sz, GFP_KERNEL); - if (buff[sg_used] == NULL) { - status = -ENOMEM; - goto cleanup1; - } + if (ioc->Request.Type.Direction & XFER_WRITE) { - if (copy_from_user(buff[sg_used], data_ptr, sz)) { - status = -EFAULT; + buff[sg_used] = memdup_user(data_ptr, sz); + if (IS_ERR(buff[sg_used])) { + status = PTR_ERR(buff[sg_used]); goto cleanup1; } - } else - memset(buff[sg_used], 0, sz); + } else { + buff[sg_used] = kzalloc(sz, GFP_KERNEL); + if (!buff[sg_used]) { + status = -ENOMEM; + goto cleanup1; + } + } + left -= sz; data_ptr += sz; sg_used++; diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index d84413b77d849..421db8996927b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -987,11 +987,9 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, list_for_each_entry_safe(mpt3sas_phy, next_phy, &mpt3sas_port->phy_list, port_siblings) { if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) - dev_printk(KERN_INFO, &mpt3sas_port->port->dev, - "remove: sas_addr(0x%016llx), phy(%d)\n", - (unsigned long long) - mpt3sas_port->remote_identify.sas_address, - mpt3sas_phy->phy_id); + ioc_info(ioc, "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) mpt3sas_port->remote_identify.sas_address, + mpt3sas_phy->phy_id); mpt3sas_phy->phy_belongs_to_port = 0; if (!ioc->remove_host) sas_port_delete_phy(mpt3sas_port->port, diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 020037cbf0d91..655cca5fe7ccb 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -124,7 +124,7 @@ static void mvs_free(struct mvs_info *mvi) if (mvi->shost) scsi_host_put(mvi->shost); list_for_each_entry(mwq, &mvi->wq_list, entry) - cancel_delayed_work(&mwq->work_q); + cancel_delayed_work_sync(&mwq->work_q); kfree(mvi->rsvd_tags); kfree(mvi); } diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index 1469d0c54e455..5a02fd3bc6c9e 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -498,14 +498,14 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, /* Temporary dma mapping, used only in the scope of this function */ mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox), &mbox_addr, GFP_KERNEL); - if (dma_mapping_error(&pdev->dev, mbox_addr)) + if (!mbox) return false; /* These are the base addresses for the command memory mailbox array */ cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox); cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size, &cs->cmd_mbox_addr, GFP_KERNEL); - if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) { + if (!cmd_mbox) { dev_err(&pdev->dev, "Failed to map command mailbox\n"); goto out_free; } @@ -520,7 +520,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox); stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size, &cs->stat_mbox_addr, GFP_KERNEL); - if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) { + if (!stat_mbox) { dev_err(&pdev->dev, "Failed to map status mailbox\n"); goto out_free; } @@ -533,7 +533,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, cs->fwstat_buf = dma_alloc_coherent(&pdev->dev, sizeof(struct myrs_fwstat), &cs->fwstat_addr, GFP_KERNEL); - if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) { + if (!cs->fwstat_buf) { dev_err(&pdev->dev, "Failed to map firmware health buffer\n"); cs->fwstat_buf = NULL; goto out_free; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index a9d6dac413346..4daab8b6d6752 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -703,6 +703,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev) unsigned long flags = 0; struct pm8001_hba_info *pm8001_ha; struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct domain_device *parent_dev = dev->parent; pm8001_ha = pm8001_find_ha_by_dev(dev); spin_lock_irqsave(&pm8001_ha->lock, flags); @@ -719,7 +720,13 @@ static void pm8001_dev_gone_notify(struct domain_device *dev) spin_lock_irqsave(&pm8001_ha->lock, flags); } PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); - pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0; + + /* + * The phy array only contains local phys. Thus, we cannot clear + * phy_attached for a device behind an expander. + */ + if (!(parent_dev && dev_is_expander(parent_dev->dev_type))) + pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0; pm8001_free_dev(pm8001_dev); } else { pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n"); diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c index dcde55c8ee5de..be20e2c457b8e 100644 --- a/drivers/scsi/qla2xxx/qla_edif.c +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -1797,7 +1797,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job) switch (rval) { case QLA_SUCCESS: break; - case EAGAIN: + case -EAGAIN: msleep(EDIF_MSLEEP_INTERVAL); cnt++; if (cnt < EDIF_RETRY_COUNT) @@ -3648,7 +3648,7 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job) p->e.extra_rx_xchg_address, p->e.extra_control_flags, sp->handle, sp->remap.req.len, bsg_job); break; - case EAGAIN: + case -EAGAIN: msleep(EDIF_MSLEEP_INTERVAL); cnt++; if (cnt < EDIF_RETRY_COUNT) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 79cdfec2bca35..8bd4aa935e22b 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2059,11 +2059,11 @@ static void qla_marker_sp_done(srb_t *sp, int res) int cnt = 5; \ do { \ if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\ - _rval = EINVAL; \ + _rval = -EINVAL; \ break; \ } \ _rval = qla2x00_start_sp(_sp); \ - if (_rval == EAGAIN) \ + if (_rval == -EAGAIN) \ msleep(1); \ else \ break; \ diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 8ee2e337c9e1b..316594aa40cc5 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -419,7 +419,7 @@ static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport, switch (rval) { case QLA_SUCCESS: break; - case EAGAIN: + case -EAGAIN: msleep(PURLS_MSLEEP_INTERVAL); cnt++; if (cnt < PURLS_RETRY_COUNT) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index ee1d5dec3bc60..3745cf8569171 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3695,10 +3695,10 @@ static int sd_revalidate_disk(struct gendisk *disk) struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; sector_t old_capacity = sdkp->capacity; - struct queue_limits lim; - unsigned char *buffer; + struct queue_limits *lim = NULL; + unsigned char *buffer = NULL; unsigned int dev_max; - int err; + int err = 0; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_revalidate_disk\n")); @@ -3710,6 +3710,10 @@ static int sd_revalidate_disk(struct gendisk *disk) if (!scsi_device_online(sdp)) goto out; + lim = kmalloc(sizeof(*lim), GFP_KERNEL); + if (!lim) + goto out; + buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL); if (!buffer) { sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " @@ -3719,14 +3723,14 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_spinup_disk(sdkp); - lim = queue_limits_start_update(sdkp->disk->queue); + *lim = queue_limits_start_update(sdkp->disk->queue); /* * Without media there is no reason to ask; moreover, some devices * react badly if we do. */ if (sdkp->media_present) { - sd_read_capacity(sdkp, &lim, buffer); + sd_read_capacity(sdkp, lim, buffer); /* * Some USB/UAS devices return generic values for mode pages * until the media has been accessed. Trigger a READ operation @@ -3740,17 +3744,17 @@ static int sd_revalidate_disk(struct gendisk *disk) * cause this to be updated correctly and any device which * doesn't support it should be treated as rotational. */ - lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); + lim->features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); - sd_read_block_limits(sdkp, &lim); + sd_read_block_limits(sdkp, lim); sd_read_block_limits_ext(sdkp); - sd_read_block_characteristics(sdkp, &lim); - sd_zbc_read_zones(sdkp, &lim, buffer); + sd_read_block_characteristics(sdkp, lim); + sd_zbc_read_zones(sdkp, lim, buffer); } - sd_config_discard(sdkp, &lim, sd_discard_mode(sdkp)); + sd_config_discard(sdkp, lim, sd_discard_mode(sdkp)); sd_print_capacity(sdkp, old_capacity); @@ -3760,47 +3764,46 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_app_tag_own(sdkp, buffer); sd_read_write_same(sdkp, buffer); sd_read_security(sdkp, buffer); - sd_config_protection(sdkp, &lim); + sd_config_protection(sdkp, lim); } /* * We now have all cache related info, determine how we deal * with flush requests. */ - sd_set_flush_flag(sdkp, &lim); + sd_set_flush_flag(sdkp, lim); /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; /* Some devices report a maximum block count for READ/WRITE requests. */ dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); - lim.max_dev_sectors = logical_to_sectors(sdp, dev_max); + lim->max_dev_sectors = logical_to_sectors(sdp, dev_max); if (sd_validate_min_xfer_size(sdkp)) - lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks); + lim->io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks); else - lim.io_min = 0; + lim->io_min = 0; /* * Limit default to SCSI host optimal sector limit if set. There may be * an impact on performance for when the size of a request exceeds this * host limit. */ - lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT; + lim->io_opt = sdp->host->opt_sectors << SECTOR_SHIFT; if (sd_validate_opt_xfer_size(sdkp, dev_max)) { - lim.io_opt = min_not_zero(lim.io_opt, + lim->io_opt = min_not_zero(lim->io_opt, logical_to_bytes(sdp, sdkp->opt_xfer_blocks)); } sdkp->first_scan = 0; set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); - sd_config_write_same(sdkp, &lim); - kfree(buffer); + sd_config_write_same(sdkp, lim); - err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim); + err = queue_limits_commit_update_frozen(sdkp->disk->queue, lim); if (err) - return err; + goto out; /* * Query concurrent positioning ranges after @@ -3819,7 +3822,10 @@ static int sd_revalidate_disk(struct gendisk *disk) set_capacity_and_notify(disk, 0); out: - return 0; + kfree(buffer); + kfree(lim); + + return err; } /** diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c index 9a91298c12539..4cb8169aec6b5 100644 --- a/drivers/soc/mediatek/mtk-svs.c +++ b/drivers/soc/mediatek/mtk-svs.c @@ -2167,10 +2167,18 @@ static struct device *svs_add_device_link(struct svs_platform *svsp, return dev; } +static void svs_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + static int svs_mt8192_platform_probe(struct svs_platform *svsp) { struct device *dev; u32 idx; + int ret; svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst"); if (IS_ERR(svsp->rst)) @@ -2181,6 +2189,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp) if (IS_ERR(dev)) return dev_err_probe(svsp->dev, PTR_ERR(dev), "failed to get lvts device\n"); + put_device(dev); for (idx = 0; idx < svsp->bank_max; idx++) { struct svs_bank *svsb = &svsp->banks[idx]; @@ -2190,6 +2199,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp) case SVSB_SWID_CPU_LITTLE: case SVSB_SWID_CPU_BIG: svsb->opp_dev = get_cpu_device(bdata->cpu_id); + get_device(svsb->opp_dev); break; case SVSB_SWID_CCI: svsb->opp_dev = svs_add_device_link(svsp, "cci"); @@ -2209,6 +2219,11 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp) return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), "failed to get OPP device for bank %d\n", idx); + + ret = devm_add_action_or_reset(svsp->dev, svs_put_device, + svsb->opp_dev); + if (ret) + return ret; } return 0; @@ -2218,11 +2233,13 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp) { struct device *dev; u32 idx; + int ret; dev = svs_add_device_link(svsp, "thermal-sensor"); if (IS_ERR(dev)) return dev_err_probe(svsp->dev, PTR_ERR(dev), "failed to get thermal device\n"); + put_device(dev); for (idx = 0; idx < svsp->bank_max; idx++) { struct svs_bank *svsb = &svsp->banks[idx]; @@ -2232,6 +2249,7 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp) case SVSB_SWID_CPU_LITTLE: case SVSB_SWID_CPU_BIG: svsb->opp_dev = get_cpu_device(bdata->cpu_id); + get_device(svsb->opp_dev); break; case SVSB_SWID_CCI: svsb->opp_dev = svs_add_device_link(svsp, "cci"); @@ -2248,6 +2266,11 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp) return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), "failed to get OPP device for bank %d\n", idx); + + ret = devm_add_action_or_reset(svsp->dev, svs_put_device, + svsb->opp_dev); + if (ret) + return ret; } return 0; diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index 641f29a98cbd2..cc72a31a450e4 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -453,13 +453,10 @@ static irqreturn_t tcs_tx_done(int irq, void *p) trace_rpmh_tx_done(drv, i, req); - /* - * If wake tcs was re-purposed for sending active - * votes, clear AMC trigger & enable modes and + /* Clear AMC trigger & enable modes and * disable interrupt for this TCS */ - if (!drv->tcs[ACTIVE_TCS].num_tcs) - __tcs_set_trigger(drv, i, false); + __tcs_set_trigger(drv, i, false); skip: /* Reclaim the TCS */ write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0); diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c index 1369691a997bf..b78163eaed61d 100644 --- a/drivers/spi/spi-airoha-snfi.c +++ b/drivers/spi/spi-airoha-snfi.c @@ -192,6 +192,14 @@ #define SPI_NAND_OP_RESET 0xff #define SPI_NAND_OP_DIE_SELECT 0xc2 +/* SNAND FIFO commands */ +#define SNAND_FIFO_TX_BUSWIDTH_SINGLE 0x08 +#define SNAND_FIFO_TX_BUSWIDTH_DUAL 0x09 +#define SNAND_FIFO_TX_BUSWIDTH_QUAD 0x0a +#define SNAND_FIFO_RX_BUSWIDTH_SINGLE 0x0c +#define SNAND_FIFO_RX_BUSWIDTH_DUAL 0x0e +#define SNAND_FIFO_RX_BUSWIDTH_QUAD 0x0f + #define SPI_NAND_CACHE_SIZE (SZ_4K + SZ_256) #define SPI_MAX_TRANSFER_SIZE 511 @@ -206,13 +214,6 @@ enum airoha_snand_cs { SPI_CHIP_SEL_LOW, }; -struct airoha_snand_dev { - size_t buf_len; - - u8 *txrx_buf; - dma_addr_t dma_addr; -}; - struct airoha_snand_ctrl { struct device *dev; struct regmap *regmap_ctrl; @@ -394,10 +395,26 @@ static int airoha_snand_set_mode(struct airoha_snand_ctrl *as_ctrl, return regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_DUMMY, 0); } -static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd, - const u8 *data, int len) +static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, + const u8 *data, int len, int buswidth) { int i, data_len; + u8 cmd; + + switch (buswidth) { + case 0: + case 1: + cmd = SNAND_FIFO_TX_BUSWIDTH_SINGLE; + break; + case 2: + cmd = SNAND_FIFO_TX_BUSWIDTH_DUAL; + break; + case 4: + cmd = SNAND_FIFO_TX_BUSWIDTH_QUAD; + break; + default: + return -EINVAL; + } for (i = 0; i < len; i += data_len) { int err; @@ -416,16 +433,32 @@ static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd, return 0; } -static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data, - int len) +static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, + u8 *data, int len, int buswidth) { int i, data_len; + u8 cmd; + + switch (buswidth) { + case 0: + case 1: + cmd = SNAND_FIFO_RX_BUSWIDTH_SINGLE; + break; + case 2: + cmd = SNAND_FIFO_RX_BUSWIDTH_DUAL; + break; + case 4: + cmd = SNAND_FIFO_RX_BUSWIDTH_QUAD; + break; + default: + return -EINVAL; + } for (i = 0; i < len; i += data_len) { int err; data_len = min(len - i, SPI_MAX_TRANSFER_SIZE); - err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len); + err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len); if (err) return err; @@ -617,14 +650,18 @@ static bool airoha_snand_supports_op(struct spi_mem *mem, static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc) { - struct airoha_snand_dev *as_dev = spi_get_ctldata(desc->mem->spi); + u8 *txrx_buf = spi_get_ctldata(desc->mem->spi); - if (!as_dev->txrx_buf) + if (!txrx_buf) return -EINVAL; if (desc->info.offset + desc->info.length > U32_MAX) return -EINVAL; + /* continuous reading is not supported */ + if (desc->info.length > SPI_NAND_CACHE_SIZE) + return -E2BIG; + if (!airoha_snand_supports_op(desc->mem, &desc->info.op_tmpl)) return -EOPNOTSUPP; @@ -634,10 +671,11 @@ static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc) static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf) { - struct spi_device *spi = desc->mem->spi; - struct airoha_snand_dev *as_dev = spi_get_ctldata(spi); struct spi_mem_op *op = &desc->info.op_tmpl; + struct spi_device *spi = desc->mem->spi; struct airoha_snand_ctrl *as_ctrl; + u8 *txrx_buf = spi_get_ctldata(spi); + dma_addr_t dma_addr; u32 val, rd_mode; int err; @@ -660,16 +698,19 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, err = airoha_snand_nfi_config(as_ctrl); if (err) - return err; + goto error_dma_mode_off; - dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr, - as_dev->buf_len, DMA_BIDIRECTIONAL); + dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE, + DMA_FROM_DEVICE); + err = dma_mapping_error(as_ctrl->dev, dma_addr); + if (err) + goto error_dma_mode_off; /* set dma addr */ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR, - as_dev->dma_addr); + dma_addr); if (err) - return err; + goto error_dma_unmap; /* set cust sec size */ val = as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num; @@ -678,58 +719,59 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, REG_SPI_NFI_SNF_MISC_CTL2, SPI_NFI_READ_DATA_BYTE_NUM, val); if (err) - return err; + goto error_dma_unmap; /* set read command */ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL2, op->cmd.opcode); if (err) - return err; + goto error_dma_unmap; /* set read mode */ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL, FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, rd_mode)); if (err) - return err; + goto error_dma_unmap; - /* set read addr */ - err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0); + /* set read addr: zero page offset + descriptor read offset */ + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, + desc->info.offset); if (err) - return err; + goto error_dma_unmap; /* set nfi read */ err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, SPI_NFI_OPMODE, FIELD_PREP(SPI_NFI_OPMODE, 6)); if (err) - return err; + goto error_dma_unmap; err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, SPI_NFI_READ_MODE | SPI_NFI_DMA_MODE); if (err) - return err; + goto error_dma_unmap; err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x0); if (err) - return err; + goto error_dma_unmap; /* trigger dma start read */ err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, SPI_NFI_RD_TRIG); if (err) - return err; + goto error_dma_unmap; err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, SPI_NFI_RD_TRIG); if (err) - return err; + goto error_dma_unmap; err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, val, (val & SPI_NFI_READ_FROM_CACHE_DONE), 0, 1 * USEC_PER_SEC); if (err) - return err; + goto error_dma_unmap; /* * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end @@ -739,35 +781,43 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, SPI_NFI_READ_FROM_CACHE_DONE, SPI_NFI_READ_FROM_CACHE_DONE); if (err) - return err; + goto error_dma_unmap; err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR, val, (val & SPI_NFI_AHB_DONE), 0, 1 * USEC_PER_SEC); if (err) - return err; + goto error_dma_unmap; /* DMA read need delay for data ready from controller to DRAM */ udelay(1); - dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr, - as_dev->buf_len, DMA_BIDIRECTIONAL); + dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE, + DMA_FROM_DEVICE); err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); if (err < 0) return err; - memcpy(buf, as_dev->txrx_buf + offs, len); + memcpy(buf, txrx_buf + offs, len); return len; + +error_dma_unmap: + dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE, + DMA_FROM_DEVICE); +error_dma_mode_off: + airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); + return err; } static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, const void *buf) { - struct spi_device *spi = desc->mem->spi; - struct airoha_snand_dev *as_dev = spi_get_ctldata(spi); struct spi_mem_op *op = &desc->info.op_tmpl; + struct spi_device *spi = desc->mem->spi; + u8 *txrx_buf = spi_get_ctldata(spi); struct airoha_snand_ctrl *as_ctrl; + dma_addr_t dma_addr; u32 wr_mode, val; int err; @@ -776,19 +826,20 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, if (err < 0) return err; - dma_sync_single_for_cpu(as_ctrl->dev, as_dev->dma_addr, - as_dev->buf_len, DMA_BIDIRECTIONAL); - memcpy(as_dev->txrx_buf + offs, buf, len); - dma_sync_single_for_device(as_ctrl->dev, as_dev->dma_addr, - as_dev->buf_len, DMA_BIDIRECTIONAL); + memcpy(txrx_buf + offs, buf, len); + dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE, + DMA_TO_DEVICE); + err = dma_mapping_error(as_ctrl->dev, dma_addr); + if (err) + return err; err = airoha_snand_set_mode(as_ctrl, SPI_MODE_DMA); if (err < 0) - return err; + goto error_dma_unmap; err = airoha_snand_nfi_config(as_ctrl); if (err) - return err; + goto error_dma_unmap; if (op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_QUAD || op->cmd.opcode == SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD) @@ -797,9 +848,9 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, wr_mode = 0; err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR, - as_dev->dma_addr); + dma_addr); if (err) - return err; + goto error_dma_unmap; val = FIELD_PREP(SPI_NFI_PROG_LOAD_BYTE_NUM, as_ctrl->nfi_cfg.sec_size * as_ctrl->nfi_cfg.sec_num); @@ -807,65 +858,67 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, REG_SPI_NFI_SNF_MISC_CTL2, SPI_NFI_PROG_LOAD_BYTE_NUM, val); if (err) - return err; + goto error_dma_unmap; err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL1, FIELD_PREP(SPI_NFI_PG_LOAD_CMD, op->cmd.opcode)); if (err) - return err; + goto error_dma_unmap; err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL, FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, wr_mode)); if (err) - return err; + goto error_dma_unmap; - err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0); + /* set write addr: zero page offset + descriptor write offset */ + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, + desc->info.offset); if (err) - return err; + goto error_dma_unmap; err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, SPI_NFI_READ_MODE); if (err) - return err; + goto error_dma_unmap; err = regmap_update_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, SPI_NFI_OPMODE, FIELD_PREP(SPI_NFI_OPMODE, 3)); if (err) - return err; + goto error_dma_unmap; err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CNFG, SPI_NFI_DMA_MODE); if (err) - return err; + goto error_dma_unmap; err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_CMD, 0x80); if (err) - return err; + goto error_dma_unmap; err = regmap_clear_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, SPI_NFI_WR_TRIG); if (err) - return err; + goto error_dma_unmap; err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_CON, SPI_NFI_WR_TRIG); if (err) - return err; + goto error_dma_unmap; err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_INTR, val, (val & SPI_NFI_AHB_DONE), 0, 1 * USEC_PER_SEC); if (err) - return err; + goto error_dma_unmap; err = regmap_read_poll_timeout(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, val, (val & SPI_NFI_LOAD_TO_CACHE_DONE), 0, 1 * USEC_PER_SEC); if (err) - return err; + goto error_dma_unmap; /* * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end @@ -875,24 +928,48 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, SPI_NFI_LOAD_TO_CACHE_DONE, SPI_NFI_LOAD_TO_CACHE_DONE); if (err) - return err; + goto error_dma_unmap; + dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE, + DMA_TO_DEVICE); err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); if (err < 0) return err; return len; + +error_dma_unmap: + dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE, + DMA_TO_DEVICE); + airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); + return err; } static int airoha_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { - u8 data[8], cmd, opcode = op->cmd.opcode; struct airoha_snand_ctrl *as_ctrl; + int op_len, addr_len, dummy_len; + u8 buf[20], *data; int i, err; as_ctrl = spi_controller_get_devdata(mem->spi->controller); + op_len = op->cmd.nbytes; + addr_len = op->addr.nbytes; + dummy_len = op->dummy.nbytes; + + if (op_len + dummy_len + addr_len > sizeof(buf)) + return -EIO; + + data = buf; + for (i = 0; i < op_len; i++) + *data++ = op->cmd.opcode >> (8 * (op_len - i - 1)); + for (i = 0; i < addr_len; i++) + *data++ = op->addr.val >> (8 * (addr_len - i - 1)); + for (i = 0; i < dummy_len; i++) + *data++ = 0xff; + /* switch to manual mode */ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); if (err < 0) @@ -903,40 +980,40 @@ static int airoha_snand_exec_op(struct spi_mem *mem, return err; /* opcode */ - err = airoha_snand_write_data(as_ctrl, 0x8, &opcode, sizeof(opcode)); + data = buf; + err = airoha_snand_write_data(as_ctrl, data, op_len, + op->cmd.buswidth); if (err) return err; /* addr part */ - cmd = opcode == SPI_NAND_OP_GET_FEATURE ? 0x11 : 0x8; - put_unaligned_be64(op->addr.val, data); - - for (i = ARRAY_SIZE(data) - op->addr.nbytes; - i < ARRAY_SIZE(data); i++) { - err = airoha_snand_write_data(as_ctrl, cmd, &data[i], - sizeof(data[0])); + data += op_len; + if (addr_len) { + err = airoha_snand_write_data(as_ctrl, data, addr_len, + op->addr.buswidth); if (err) return err; } /* dummy */ - data[0] = 0xff; - for (i = 0; i < op->dummy.nbytes; i++) { - err = airoha_snand_write_data(as_ctrl, 0x8, &data[0], - sizeof(data[0])); + data += addr_len; + if (dummy_len) { + err = airoha_snand_write_data(as_ctrl, data, dummy_len, + op->dummy.buswidth); if (err) return err; } /* data */ - if (op->data.dir == SPI_MEM_DATA_IN) { - err = airoha_snand_read_data(as_ctrl, op->data.buf.in, - op->data.nbytes); - if (err) - return err; - } else { - err = airoha_snand_write_data(as_ctrl, 0x8, op->data.buf.out, - op->data.nbytes); + if (op->data.nbytes) { + if (op->data.dir == SPI_MEM_DATA_IN) + err = airoha_snand_read_data(as_ctrl, op->data.buf.in, + op->data.nbytes, + op->data.buswidth); + else + err = airoha_snand_write_data(as_ctrl, op->data.buf.out, + op->data.nbytes, + op->data.buswidth); if (err) return err; } @@ -956,42 +1033,20 @@ static const struct spi_controller_mem_ops airoha_snand_mem_ops = { static int airoha_snand_setup(struct spi_device *spi) { struct airoha_snand_ctrl *as_ctrl; - struct airoha_snand_dev *as_dev; - - as_ctrl = spi_controller_get_devdata(spi->controller); - - as_dev = devm_kzalloc(as_ctrl->dev, sizeof(*as_dev), GFP_KERNEL); - if (!as_dev) - return -ENOMEM; + u8 *txrx_buf; /* prepare device buffer */ - as_dev->buf_len = SPI_NAND_CACHE_SIZE; - as_dev->txrx_buf = devm_kzalloc(as_ctrl->dev, as_dev->buf_len, - GFP_KERNEL); - if (!as_dev->txrx_buf) - return -ENOMEM; - - as_dev->dma_addr = dma_map_single(as_ctrl->dev, as_dev->txrx_buf, - as_dev->buf_len, DMA_BIDIRECTIONAL); - if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr)) + as_ctrl = spi_controller_get_devdata(spi->controller); + txrx_buf = devm_kzalloc(as_ctrl->dev, SPI_NAND_CACHE_SIZE, + GFP_KERNEL); + if (!txrx_buf) return -ENOMEM; - spi_set_ctldata(spi, as_dev); + spi_set_ctldata(spi, txrx_buf); return 0; } -static void airoha_snand_cleanup(struct spi_device *spi) -{ - struct airoha_snand_dev *as_dev = spi_get_ctldata(spi); - struct airoha_snand_ctrl *as_ctrl; - - as_ctrl = spi_controller_get_devdata(spi->controller); - dma_unmap_single(as_ctrl->dev, as_dev->dma_addr, - as_dev->buf_len, DMA_BIDIRECTIONAL); - spi_set_ctldata(spi, NULL); -} - static int airoha_snand_nfi_setup(struct airoha_snand_ctrl *as_ctrl) { u32 val, sec_size, sec_num; @@ -1093,7 +1148,6 @@ static int airoha_snand_probe(struct platform_device *pdev) ctrl->bits_per_word_mask = SPI_BPW_MASK(8); ctrl->mode_bits = SPI_RX_DUAL; ctrl->setup = airoha_snand_setup; - ctrl->cleanup = airoha_snand_cleanup; device_set_node(&ctrl->dev, dev_fwnode(dev)); err = airoha_snand_nfi_setup(as_ctrl); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 56be0b6901a87..06e43b184d85d 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -712,6 +712,7 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata, reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; reg |= (op->addr.nbytes - 1); writel(reg, reg_base + CQSPI_REG_SIZE); + readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */ return 0; } @@ -754,6 +755,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, reinit_completion(&cqspi->transfer_complete); writel(CQSPI_REG_INDIRECTRD_START_MASK, reg_base + CQSPI_REG_INDIRECTRD); + readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */ while (remaining > 0) { if (use_irq && @@ -1033,6 +1035,7 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; reg |= (op->addr.nbytes - 1); writel(reg, reg_base + CQSPI_REG_SIZE); + readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */ return 0; } @@ -1058,6 +1061,8 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, reinit_completion(&cqspi->transfer_complete); writel(CQSPI_REG_INDIRECTWR_START_MASK, reg_base + CQSPI_REG_INDIRECTWR); + readl(reg_base + CQSPI_REG_INDIRECTWR); /* Flush posted write. */ + /* * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access * Controller programming sequence, couple of cycles of @@ -1668,12 +1673,10 @@ static const struct spi_controller_mem_caps cqspi_mem_caps = { static int cqspi_setup_flash(struct cqspi_st *cqspi) { - unsigned int max_cs = cqspi->num_chipselect - 1; struct platform_device *pdev = cqspi->pdev; struct device *dev = &pdev->dev; struct cqspi_flash_pdata *f_pdata; - unsigned int cs; - int ret; + int ret, cs, max_cs = -1; /* Get flash device data */ for_each_available_child_of_node_scoped(dev->of_node, np) { @@ -1686,10 +1689,10 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi) if (cs >= cqspi->num_chipselect) { dev_err(dev, "Chip select %d out of range.\n", cs); return -EINVAL; - } else if (cs < max_cs) { - max_cs = cs; } + max_cs = max_t(int, cs, max_cs); + f_pdata = &cqspi->f_pdata[cs]; f_pdata->cqspi = cqspi; f_pdata->cs = cs; @@ -1699,6 +1702,11 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi) return ret; } + if (max_cs < 0) { + dev_err(dev, "No flash device declared\n"); + return -ENODEV; + } + cqspi->num_chipselect = max_cs + 1; return 0; } diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 5a1e55a01c521..b569302f22e61 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -665,6 +665,12 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f) 0, POLL_TOUT, true); if (ret) dev_warn(f->dev, "DLL lock failed, please fix it!\n"); + + /* + * For ERR050272, DLL lock status bit is not accurate, + * wait for 4us more as a workaround. + */ + udelay(4); } /* diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8d6341b0d8668..5ad9f4a2148fa 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2462,7 +2462,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, if (rc > ctlr->num_chipselect) { dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n", nc, rc); - return rc; + return -EINVAL; } if ((of_property_read_bool(nc, "parallel-memories")) && (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) { diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c index 6769f066b0b4e..028ed5d0ecbe7 100644 --- a/drivers/staging/axis-fifo/axis-fifo.c +++ b/drivers/staging/axis-fifo/axis-fifo.c @@ -42,7 +42,6 @@ #define DRIVER_NAME "axis_fifo" #define READ_BUF_SIZE 128U /* read buffer length in words */ -#define WRITE_BUF_SIZE 128U /* write buffer length in words */ /* ---------------------------- * IP register offsets @@ -392,6 +391,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, } bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET); + words_available = bytes_available / sizeof(u32); if (!bytes_available) { dev_err(fifo->dt_device, "received a packet of length 0\n"); ret = -EIO; @@ -402,7 +402,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n", bytes_available, len); ret = -EINVAL; - goto end_unlock; + goto err_flush_rx; } if (bytes_available % sizeof(u32)) { @@ -411,11 +411,9 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, */ dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n"); ret = -EIO; - goto end_unlock; + goto err_flush_rx; } - words_available = bytes_available / sizeof(u32); - /* read data into an intermediate buffer, copying the contents * to userspace when the buffer is full */ @@ -427,18 +425,23 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, tmp_buf[i] = ioread32(fifo->base_addr + XLLF_RDFD_OFFSET); } + words_available -= copy; if (copy_to_user(buf + copied * sizeof(u32), tmp_buf, copy * sizeof(u32))) { ret = -EFAULT; - goto end_unlock; + goto err_flush_rx; } copied += copy; - words_available -= copy; } + mutex_unlock(&fifo->read_lock); + + return bytes_available; - ret = bytes_available; +err_flush_rx: + while (words_available--) + ioread32(fifo->base_addr + XLLF_RDFD_OFFSET); end_unlock: mutex_unlock(&fifo->read_lock); @@ -466,11 +469,8 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf, { struct axis_fifo *fifo = (struct axis_fifo *)f->private_data; unsigned int words_to_write; - unsigned int copied; - unsigned int copy; - unsigned int i; + u32 *txbuf; int ret; - u32 tmp_buf[WRITE_BUF_SIZE]; if (len % sizeof(u32)) { dev_err(fifo->dt_device, @@ -486,11 +486,17 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf, return -EINVAL; } - if (words_to_write > fifo->tx_fifo_depth) { - dev_err(fifo->dt_device, "tried to write more words [%u] than slots in the fifo buffer [%u]\n", - words_to_write, fifo->tx_fifo_depth); + /* + * In 'Store-and-Forward' mode, the maximum packet that can be + * transmitted is limited by the size of the FIFO, which is + * (C_TX_FIFO_DEPTH–4)*(data interface width/8) bytes. + * + * Do not attempt to send a packet larger than 'tx_fifo_depth - 4', + * otherwise a 'Transmit Packet Overrun Error' interrupt will be + * raised, which requires a reset of the TX circuit to recover. + */ + if (words_to_write > (fifo->tx_fifo_depth - 4)) return -EINVAL; - } if (fifo->write_flags & O_NONBLOCK) { /* @@ -529,32 +535,20 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf, } } - /* write data from an intermediate buffer into the fifo IP, refilling - * the buffer with userspace data as needed - */ - copied = 0; - while (words_to_write > 0) { - copy = min(words_to_write, WRITE_BUF_SIZE); - - if (copy_from_user(tmp_buf, buf + copied * sizeof(u32), - copy * sizeof(u32))) { - ret = -EFAULT; - goto end_unlock; - } - - for (i = 0; i < copy; i++) - iowrite32(tmp_buf[i], fifo->base_addr + - XLLF_TDFD_OFFSET); - - copied += copy; - words_to_write -= copy; + txbuf = vmemdup_user(buf, len); + if (IS_ERR(txbuf)) { + ret = PTR_ERR(txbuf); + goto end_unlock; } - ret = copied * sizeof(u32); + for (int i = 0; i < words_to_write; ++i) + iowrite32(txbuf[i], fifo->base_addr + XLLF_TDFD_OFFSET); /* write packet size to fifo */ - iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET); + iowrite32(len, fifo->base_addr + XLLF_TLR_OFFSET); + ret = len; + kvfree(txbuf); end_unlock: mutex_unlock(&fifo->write_lock); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index c40217f44b1bc..3188bca17e1b9 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2776,7 +2776,7 @@ static ssize_t target_lu_gp_members_show(struct config_item *item, char *page) config_item_name(&dev->dev_group.cg_item)); cur_len++; /* Extra byte for NULL terminator */ - if ((cur_len + len) > PAGE_SIZE) { + if ((cur_len + len) > PAGE_SIZE || cur_len > LU_GROUP_NAME_BUF) { pr_warn("Ran out of lu_gp_show_attr" "_members buffer\n"); break; diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 2a7d253d9c554..8e50476eb71fb 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -321,6 +321,14 @@ register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags, if (unlikely(len <= 0)) { ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM); goto err_free_shm_pages; + } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) { + /* + * If we only got a few pages, update to release the + * correct amount below. + */ + shm->num_pages = len / PAGE_SIZE; + ret = ERR_PTR(-ENOMEM); + goto err_put_shm_pages; } /* diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig index 2c7f3f9a26ebb..a6bb01082ec69 100644 --- a/drivers/thermal/qcom/Kconfig +++ b/drivers/thermal/qcom/Kconfig @@ -34,7 +34,8 @@ config QCOM_SPMI_TEMP_ALARM config QCOM_LMH tristate "Qualcomm Limits Management Hardware" - depends on ARCH_QCOM && QCOM_SCM + depends on ARCH_QCOM || COMPILE_TEST + select QCOM_SCM help This enables initialization of Qualcomm limits management hardware(LMh). LMh allows for hardware-enforced mitigation for cpus based on diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c index d2d49264cf83a..7c299184c59b1 100644 --- a/drivers/thermal/qcom/lmh.c +++ b/drivers/thermal/qcom/lmh.c @@ -5,6 +5,8 @@ */ #include #include +#include +#include #include #include #include diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 252849910588f..c917fc20b469a 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -461,6 +461,7 @@ static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg); static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr); static void gsmld_write_trigger(struct gsm_mux *gsm); static void gsmld_write_task(struct work_struct *work); +static int gsm_modem_send_initial_msc(struct gsm_dlci *dlci); /** * gsm_fcs_add - update FCS @@ -2174,7 +2175,7 @@ static void gsm_dlci_open(struct gsm_dlci *dlci) pr_debug("DLCI %d goes open.\n", dlci->addr); /* Send current modem state */ if (dlci->addr) { - gsm_modem_update(dlci, 0); + gsm_modem_send_initial_msc(dlci); } else { /* Start keep-alive control */ gsm->ka_num = 0; @@ -4156,6 +4157,28 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk) return gsm_control_wait(dlci->gsm, ctrl); } +/** + * gsm_modem_send_initial_msc - Send initial modem status message + * + * @dlci channel + * + * Send an initial MSC message after DLCI open to set the initial + * modem status lines. This is only done for basic mode. + * Does not wait for a response as we cannot block the input queue + * processing. + */ +static int gsm_modem_send_initial_msc(struct gsm_dlci *dlci) +{ + u8 modembits[2]; + + if (dlci->adaption != 1 || dlci->gsm->encoding != GSM_BASIC_OPT) + return 0; + + modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */ + modembits[1] = (gsm_encode_modem(dlci) << 1) | EA; + return gsm_control_command(dlci->gsm, CMD_MSC, (const u8 *)&modembits, 2); +} + /** * gsm_modem_update - send modem status line state * @dlci: channel diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 51894c93c8a31..83d186c038cd9 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -620,7 +620,9 @@ static int dw8250_probe(struct platform_device *pdev) if (IS_ERR(data->rst)) return PTR_ERR(data->rst); - reset_control_deassert(data->rst); + err = reset_control_deassert(data->rst); + if (err) + return dev_err_probe(dev, err, "failed to deassert resets\n"); err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst); if (err) diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index b7a75db15249a..5ed6514ec88aa 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -39,6 +39,8 @@ #define PCI_DEVICE_ID_ACCESSIO_COM_4SM 0x10db #define PCI_DEVICE_ID_ACCESSIO_COM_8SM 0x10ea +#define PCI_DEVICE_ID_ADVANTECH_XR17V352 0x0018 + #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002 #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004 #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a @@ -1678,6 +1680,12 @@ static const struct exar8250_board pbn_fastcom35x_8 = { .exit = pci_xr17v35x_exit, }; +static const struct exar8250_board pbn_adv_XR17V352 = { + .num_ports = 2, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + static const struct exar8250_board pbn_exar_XR17V4358 = { .num_ports = 12, .setup = pci_xr17v35x_setup, @@ -1752,6 +1760,9 @@ static const struct pci_device_id exar_pci_tbl[] = { USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x), USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x), + /* ADVANTECH devices */ + EXAR_DEVICE(ADVANTECH, XR17V352, pbn_adv_XR17V352), + /* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */ EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x), EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x), diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index b9cca210e171c..1c878986fcec0 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -435,6 +435,7 @@ static int __maybe_unused mtk8250_runtime_suspend(struct device *dev) while (serial_in(up, MTK_UART_DEBUG0)); + clk_disable_unprepare(data->uart_clk); clk_disable_unprepare(data->bus_clk); return 0; @@ -445,6 +446,7 @@ static int __maybe_unused mtk8250_runtime_resume(struct device *dev) struct mtk8250_data *data = dev_get_drvdata(dev); clk_prepare_enable(data->bus_clk); + clk_prepare_enable(data->uart_clk); return 0; } @@ -475,13 +477,13 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, int dmacnt; #endif - data->uart_clk = devm_clk_get(&pdev->dev, "baud"); + data->uart_clk = devm_clk_get_enabled(&pdev->dev, "baud"); if (IS_ERR(data->uart_clk)) { /* * For compatibility with older device trees try unnamed * clk when no baud clk can be found. */ - data->uart_clk = devm_clk_get(&pdev->dev, NULL); + data->uart_clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(data->uart_clk)) { dev_warn(&pdev->dev, "Can't get uart clock\n"); return PTR_ERR(data->uart_clk); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 28e4beeabf8f3..4fd789a77a13b 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1401,7 +1401,7 @@ config SERIAL_STM32 config SERIAL_STM32_CONSOLE bool "Support for console on STM32" - depends on SERIAL_STM32=y + depends on SERIAL_STM32 select SERIAL_CORE_CONSOLE select SERIAL_EARLYCON diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 35369a2f77b29..2f8e3ea4fe128 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1641,6 +1641,8 @@ static int max310x_i2c_probe(struct i2c_client *client) port_client = devm_i2c_new_dummy_device(&client->dev, client->adapter, port_addr); + if (IS_ERR(port_client)) + return PTR_ERR(port_client); regcfg_i2c.name = max310x_regmap_name(i); regmaps[i] = devm_regmap_init_i2c(port_client, ®cfg_i2c); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 835bd453c0e88..8ed8ee58e3180 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -585,13 +585,6 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) div /= prescaler; } - /* Enable enhanced features */ - sc16is7xx_efr_lock(port); - sc16is7xx_port_update(port, SC16IS7XX_EFR_REG, - SC16IS7XX_EFR_ENABLE_BIT, - SC16IS7XX_EFR_ENABLE_BIT); - sc16is7xx_efr_unlock(port); - /* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, SC16IS7XX_MCR_CLKSEL_BIT, diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 420e943bb73a7..5e6197a6af5e2 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -243,7 +243,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size, &hwq->sqe_dma_addr, GFP_KERNEL); - if (!hwq->sqe_dma_addr) { + if (!hwq->sqe_base_addr) { dev_err(hba->dev, "SQE allocation failed\n"); return -ENOMEM; } @@ -252,7 +252,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size, &hwq->cqe_dma_addr, GFP_KERNEL); - if (!hwq->cqe_dma_addr) { + if (!hwq->cqe_base_addr) { dev_err(hba->dev, "CQE allocation failed\n"); return -ENOMEM; } diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index a6551a795f744..0b414d1168dd5 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -98,7 +98,6 @@ static void hv_uio_channel_cb(void *context) struct hv_device *hv_dev = chan->device_obj; struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); - chan->inbound.ring_buffer->interrupt_mask = 1; virt_mb(); uio_event_notify(&pdata->info); @@ -163,8 +162,6 @@ hv_uio_new_channel(struct vmbus_channel *new_sc) return; } - /* Disable interrupts on sub channel */ - new_sc->inbound.ring_buffer->interrupt_mask = 1; set_channel_read_mode(new_sc, HV_CALL_ISR); ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap); if (ret) { @@ -207,9 +204,7 @@ hv_uio_open(struct uio_info *info, struct inode *inode) ret = vmbus_connect_ring(dev->channel, hv_uio_channel_cb, dev->channel); - if (ret == 0) - dev->channel->inbound.ring_buffer->interrupt_mask = 1; - else + if (ret) atomic_dec(&pdata->refcnt); return ret; diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c index 36781ea60f6aa..4a9eff7fdb12f 100644 --- a/drivers/usb/cdns3/cdnsp-pci.c +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -91,7 +91,7 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL); if (!cdnsp) { ret = -ENOMEM; - goto disable_pci; + goto put_pci; } } @@ -174,9 +174,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, if (!pci_is_enabled(func)) kfree(cdnsp); -disable_pci: - pci_disable_device(pdev); - put_pci: pci_dev_put(func); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index bfd97cad8aa4d..c322d0c1d965a 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -464,6 +464,8 @@ static const struct usb_device_id usb_quirk_list[] = { /* Huawei 4G LTE module */ { USB_DEVICE(0x12d1, 0x15bb), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, + { USB_DEVICE(0x12d1, 0x15c1), .driver_info = + USB_QUIRK_DISCONNECT_SUSPEND }, { USB_DEVICE(0x12d1, 0x15c3), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, @@ -734,7 +736,7 @@ void usb_detect_quirks(struct usb_device *udev) udev->quirks ^= usb_detect_dynamic_quirks(udev); if (udev->quirks) - dev_dbg(&udev->dev, "USB quirks for this device: %x\n", + dev_dbg(&udev->dev, "USB quirks for this device: 0x%x\n", udev->quirks); #ifdef CONFIG_USB_DEFAULT_PERSIST diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 1b4d0056f1d08..82282373c786b 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1750,6 +1750,8 @@ static int configfs_composite_bind(struct usb_gadget *gadget, cdev->use_os_string = true; cdev->b_vendor_code = gi->b_vendor_code; memcpy(cdev->qw_sign, gi->qw_sign, OS_STRING_QW_SIGN_LEN); + } else { + cdev->use_os_string = false; } if (gadget_is_otg(gadget) && !otg_desc[0]) { diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index 7061720b9732e..106046e17c4e1 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -11,12 +11,15 @@ /* #define VERBOSE_DEBUG */ +#include #include #include #include #include #include +#include + #include "u_serial.h" @@ -613,6 +616,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) struct usb_string *us; int status; struct usb_ep *ep; + struct usb_request *request __free(free_usb_request) = NULL; /* REVISIT might want instance-specific strings to help * distinguish instances ... @@ -630,7 +634,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) /* allocate instance-specific interface IDs, and patch descriptors */ status = usb_interface_id(c, f); if (status < 0) - goto fail; + return status; acm->ctrl_id = status; acm_iad_descriptor.bFirstInterface = status; @@ -639,43 +643,41 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) status = usb_interface_id(c, f); if (status < 0) - goto fail; + return status; acm->data_id = status; acm_data_interface_desc.bInterfaceNumber = status; acm_union_desc.bSlaveInterface0 = status; acm_call_mgmt_descriptor.bDataInterface = status; - status = -ENODEV; - /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc); if (!ep) - goto fail; + return -ENODEV; acm->port.in = ep; ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc); if (!ep) - goto fail; + return -ENODEV; acm->port.out = ep; ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc); if (!ep) - goto fail; + return -ENODEV; acm->notify = ep; acm_iad_descriptor.bFunctionProtocol = acm->bInterfaceProtocol; acm_control_interface_desc.bInterfaceProtocol = acm->bInterfaceProtocol; /* allocate notification */ - acm->notify_req = gs_alloc_req(ep, - sizeof(struct usb_cdc_notification) + 2, - GFP_KERNEL); - if (!acm->notify_req) - goto fail; + request = gs_alloc_req(ep, + sizeof(struct usb_cdc_notification) + 2, + GFP_KERNEL); + if (!request) + return -ENODEV; - acm->notify_req->complete = acm_cdc_notify_complete; - acm->notify_req->context = acm; + request->complete = acm_cdc_notify_complete; + request->context = acm; /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at @@ -692,7 +694,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function, acm_ss_function, acm_ss_function); if (status) - goto fail; + return status; + + acm->notify_req = no_free_ptr(request); dev_dbg(&cdev->gadget->dev, "acm ttyGS%d: IN/%s OUT/%s NOTIFY/%s\n", @@ -700,14 +704,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) acm->port.in->name, acm->port.out->name, acm->notify->name); return 0; - -fail: - if (acm->notify_req) - gs_free_req(acm->notify, acm->notify_req); - - ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status); - - return status; } static void acm_unbind(struct usb_configuration *c, struct usb_function *f) diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c index 549efc84dd832..123c03a085099 100644 --- a/drivers/usb/gadget/function/f_ecm.c +++ b/drivers/usb/gadget/function/f_ecm.c @@ -8,12 +8,15 @@ /* #define VERBOSE_DEBUG */ +#include #include #include #include #include #include +#include + #include "u_ether.h" #include "u_ether_configfs.h" #include "u_ecm.h" @@ -678,6 +681,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) struct usb_ep *ep; struct f_ecm_opts *ecm_opts; + struct usb_request *request __free(free_usb_request) = NULL; if (!can_support_ecm(cdev->gadget)) return -EINVAL; @@ -711,7 +715,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) - goto fail; + return status; ecm->ctrl_id = status; ecm_iad_descriptor.bFirstInterface = status; @@ -720,24 +724,22 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) status = usb_interface_id(c, f); if (status < 0) - goto fail; + return status; ecm->data_id = status; ecm_data_nop_intf.bInterfaceNumber = status; ecm_data_intf.bInterfaceNumber = status; ecm_union_desc.bSlaveInterface0 = status; - status = -ENODEV; - /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc); if (!ep) - goto fail; + return -ENODEV; ecm->port.in_ep = ep; ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc); if (!ep) - goto fail; + return -ENODEV; ecm->port.out_ep = ep; /* NOTE: a status/notification endpoint is *OPTIONAL* but we @@ -746,20 +748,18 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc); if (!ep) - goto fail; + return -ENODEV; ecm->notify = ep; - status = -ENOMEM; - /* allocate notification request and buffer */ - ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); - if (!ecm->notify_req) - goto fail; - ecm->notify_req->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL); - if (!ecm->notify_req->buf) - goto fail; - ecm->notify_req->context = ecm; - ecm->notify_req->complete = ecm_notify_complete; + request = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!request) + return -ENOMEM; + request->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL); + if (!request->buf) + return -ENOMEM; + request->context = ecm; + request->complete = ecm_notify_complete; /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at @@ -778,7 +778,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function, ecm_ss_function, ecm_ss_function); if (status) - goto fail; + return status; /* NOTE: all that is done without knowing or caring about * the network link ... which is unavailable to this code @@ -788,20 +788,12 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) ecm->port.open = ecm_open; ecm->port.close = ecm_close; + ecm->notify_req = no_free_ptr(request); + DBG(cdev, "CDC Ethernet: IN/%s OUT/%s NOTIFY/%s\n", ecm->port.in_ep->name, ecm->port.out_ep->name, ecm->notify->name); return 0; - -fail: - if (ecm->notify_req) { - kfree(ecm->notify_req->buf); - usb_ep_free_request(ecm->notify, ecm->notify_req); - } - - ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); - - return status; } static inline struct f_ecm_opts *to_f_ecm_opts(struct config_item *item) diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 8e761249d672c..3afc9a622086c 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -11,6 +11,7 @@ * Copyright (C) 2008 Nokia Corporation */ +#include #include #include #include @@ -19,6 +20,7 @@ #include #include +#include #include "u_ether.h" #include "u_ether_configfs.h" @@ -1435,18 +1437,18 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) struct usb_ep *ep; struct f_ncm_opts *ncm_opts; + struct usb_os_desc_table *os_desc_table __free(kfree) = NULL; + struct usb_request *request __free(free_usb_request) = NULL; + if (!can_support_ecm(cdev->gadget)) return -EINVAL; ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst); if (cdev->use_os_string) { - f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), - GFP_KERNEL); - if (!f->os_desc_table) + os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL); + if (!os_desc_table) return -ENOMEM; - f->os_desc_n = 1; - f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc; } mutex_lock(&ncm_opts->lock); @@ -1458,16 +1460,15 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) mutex_unlock(&ncm_opts->lock); if (status) - goto fail; + return status; ncm_opts->bound = true; us = usb_gstrings_attach(cdev, ncm_strings, ARRAY_SIZE(ncm_string_defs)); - if (IS_ERR(us)) { - status = PTR_ERR(us); - goto fail; - } + if (IS_ERR(us)) + return PTR_ERR(us); + ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id; ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id; ncm_data_intf.iInterface = us[STRING_DATA_IDX].id; @@ -1477,20 +1478,16 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) - goto fail; + return status; ncm->ctrl_id = status; ncm_iad_desc.bFirstInterface = status; ncm_control_intf.bInterfaceNumber = status; ncm_union_desc.bMasterInterface0 = status; - if (cdev->use_os_string) - f->os_desc_table[0].if_id = - ncm_iad_desc.bFirstInterface; - status = usb_interface_id(c, f); if (status < 0) - goto fail; + return status; ncm->data_id = status; ncm_data_nop_intf.bInterfaceNumber = status; @@ -1499,35 +1496,31 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) ecm_desc.wMaxSegmentSize = cpu_to_le16(ncm_opts->max_segment_size); - status = -ENODEV; - /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc); if (!ep) - goto fail; + return -ENODEV; ncm->port.in_ep = ep; ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc); if (!ep) - goto fail; + return -ENODEV; ncm->port.out_ep = ep; ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc); if (!ep) - goto fail; + return -ENODEV; ncm->notify = ep; - status = -ENOMEM; - /* allocate notification request and buffer */ - ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); - if (!ncm->notify_req) - goto fail; - ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL); - if (!ncm->notify_req->buf) - goto fail; - ncm->notify_req->context = ncm; - ncm->notify_req->complete = ncm_notify_complete; + request = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!request) + return -ENOMEM; + request->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL); + if (!request->buf) + return -ENOMEM; + request->context = ncm; + request->complete = ncm_notify_complete; /* * support all relevant hardware speeds... we expect that when @@ -1547,7 +1540,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, ncm_ss_function, ncm_ss_function); if (status) - goto fail; + return status; /* * NOTE: all that is done without knowing or caring about @@ -1561,23 +1554,18 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); ncm->task_timer.function = ncm_tx_timeout; + if (cdev->use_os_string) { + os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc; + os_desc_table[0].if_id = ncm_iad_desc.bFirstInterface; + f->os_desc_table = no_free_ptr(os_desc_table); + f->os_desc_n = 1; + } + ncm->notify_req = no_free_ptr(request); + DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n", ncm->port.in_ep->name, ncm->port.out_ep->name, ncm->notify->name); return 0; - -fail: - kfree(f->os_desc_table); - f->os_desc_n = 0; - - if (ncm->notify_req) { - kfree(ncm->notify_req->buf); - usb_ep_free_request(ncm->notify, ncm->notify_req); - } - - ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); - - return status; } static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item) diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index 7cec19d65fb53..7451e7cb7a852 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -19,6 +19,8 @@ #include +#include + #include "u_ether.h" #include "u_ether_configfs.h" #include "u_rndis.h" @@ -662,6 +664,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) struct usb_ep *ep; struct f_rndis_opts *rndis_opts; + struct usb_os_desc_table *os_desc_table __free(kfree) = NULL; + struct usb_request *request __free(free_usb_request) = NULL; if (!can_support_rndis(c)) return -EINVAL; @@ -669,12 +673,9 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst); if (cdev->use_os_string) { - f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), - GFP_KERNEL); - if (!f->os_desc_table) + os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL); + if (!os_desc_table) return -ENOMEM; - f->os_desc_n = 1; - f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc; } rndis_iad_descriptor.bFunctionClass = rndis_opts->class; @@ -692,16 +693,14 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) gether_set_gadget(rndis_opts->net, cdev->gadget); status = gether_register_netdev(rndis_opts->net); if (status) - goto fail; + return status; rndis_opts->bound = true; } us = usb_gstrings_attach(cdev, rndis_strings, ARRAY_SIZE(rndis_string_defs)); - if (IS_ERR(us)) { - status = PTR_ERR(us); - goto fail; - } + if (IS_ERR(us)) + return PTR_ERR(us); rndis_control_intf.iInterface = us[0].id; rndis_data_intf.iInterface = us[1].id; rndis_iad_descriptor.iFunction = us[2].id; @@ -709,36 +708,30 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) - goto fail; + return status; rndis->ctrl_id = status; rndis_iad_descriptor.bFirstInterface = status; rndis_control_intf.bInterfaceNumber = status; rndis_union_desc.bMasterInterface0 = status; - if (cdev->use_os_string) - f->os_desc_table[0].if_id = - rndis_iad_descriptor.bFirstInterface; - status = usb_interface_id(c, f); if (status < 0) - goto fail; + return status; rndis->data_id = status; rndis_data_intf.bInterfaceNumber = status; rndis_union_desc.bSlaveInterface0 = status; - status = -ENODEV; - /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc); if (!ep) - goto fail; + return -ENODEV; rndis->port.in_ep = ep; ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc); if (!ep) - goto fail; + return -ENODEV; rndis->port.out_ep = ep; /* NOTE: a status/notification endpoint is, strictly speaking, @@ -747,21 +740,19 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) */ ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc); if (!ep) - goto fail; + return -ENODEV; rndis->notify = ep; - status = -ENOMEM; - /* allocate notification request and buffer */ - rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); - if (!rndis->notify_req) - goto fail; - rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL); - if (!rndis->notify_req->buf) - goto fail; - rndis->notify_req->length = STATUS_BYTECOUNT; - rndis->notify_req->context = rndis; - rndis->notify_req->complete = rndis_response_complete; + request = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!request) + return -ENOMEM; + request->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL); + if (!request->buf) + return -ENOMEM; + request->length = STATUS_BYTECOUNT; + request->context = rndis; + request->complete = rndis_response_complete; /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at @@ -778,7 +769,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function, eth_ss_function, eth_ss_function); if (status) - goto fail; + return status; rndis->port.open = rndis_open; rndis->port.close = rndis_close; @@ -789,9 +780,18 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) if (rndis->manufacturer && rndis->vendorID && rndis_set_param_vendor(rndis->params, rndis->vendorID, rndis->manufacturer)) { - status = -EINVAL; - goto fail_free_descs; + usb_free_all_descriptors(f); + return -EINVAL; + } + + if (cdev->use_os_string) { + os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc; + os_desc_table[0].if_id = rndis_iad_descriptor.bFirstInterface; + f->os_desc_table = no_free_ptr(os_desc_table); + f->os_desc_n = 1; + } + rndis->notify_req = no_free_ptr(request); /* NOTE: all that is done without knowing or caring about * the network link ... which is unavailable to this code @@ -802,21 +802,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) rndis->port.in_ep->name, rndis->port.out_ep->name, rndis->notify->name); return 0; - -fail_free_descs: - usb_free_all_descriptors(f); -fail: - kfree(f->os_desc_table); - f->os_desc_n = 0; - - if (rndis->notify_req) { - kfree(rndis->notify_req->buf); - usb_ep_free_request(rndis->notify, rndis->notify_req); - } - - ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); - - return status; } void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net) diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 112fd18d8c99d..c713a9854a3e5 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -667,8 +667,6 @@ static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr, return ERR_PTR(-EINVAL); if (!usb_raw_io_flags_valid(io->flags)) return ERR_PTR(-EINVAL); - if (io->length > PAGE_SIZE) - return ERR_PTR(-EINVAL); if (get_from_user) data = memdup_user(ptr + sizeof(*io), io->length); else { diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index d709e24c1fd42..e3d63b8fa0f4c 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -194,6 +194,9 @@ struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, req = ep->ops->alloc_request(ep, gfp_flags); + if (req) + req->ep = ep; + trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM); return req; diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index dcf31a592f5d1..4b5f03f683f77 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -1916,7 +1916,7 @@ max3421_probe(struct spi_device *spi) if (hcd) { kfree(max3421_hcd->tx); kfree(max3421_hcd->rx); - if (max3421_hcd->spi_thread) + if (!IS_ERR_OR_NULL(max3421_hcd->spi_thread)) kthread_stop(max3421_hcd->spi_thread); usb_put_hcd(hcd); } diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index 1fcc9348dd439..e36e2c031a8f1 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -458,7 +458,7 @@ static void xhci_dbc_ring_init(struct xhci_ring *ring) trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma); trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); } - xhci_initialize_ring_info(ring); + xhci_initialize_ring_info(ring, 1); } static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc) @@ -891,7 +891,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) dev_info(dbc->dev, "DbC configured\n"); portsc = readl(&dbc->regs->portsc); writel(portsc, &dbc->regs->portsc); - return EVT_GSER; + ret = EVT_GSER; + break; } return EVT_DONE; @@ -951,7 +952,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) break; case TRB_TYPE(TRB_TRANSFER): dbc_handle_xfer_event(dbc, evt); - ret = EVT_XFER_DONE; + if (ret != EVT_GSER) + ret = EVT_XFER_DONE; break; default: break; @@ -1387,8 +1389,15 @@ int xhci_dbc_suspend(struct xhci_hcd *xhci) if (!dbc) return 0; - if (dbc->state == DS_CONFIGURED) + switch (dbc->state) { + case DS_ENABLED: + case DS_CONNECTED: + case DS_CONFIGURED: dbc->resume_required = 1; + break; + default: + break; + } xhci_dbc_stop(dbc); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index c9694526b157d..f0ed38da6a0c8 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -27,12 +27,14 @@ * "All components of all Command and Transfer TRBs shall be initialized to '0'" */ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, + unsigned int cycle_state, unsigned int max_packet, unsigned int num, gfp_t flags) { struct xhci_segment *seg; dma_addr_t dma; + int i; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); @@ -54,6 +56,11 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, return NULL; } } + /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ + if (cycle_state == 0) { + for (i = 0; i < TRBS_PER_SEGMENT; i++) + seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE); + } seg->num = num; seg->dma = dma; seg->next = NULL; @@ -131,14 +138,6 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, chain_links = xhci_link_chain_quirk(xhci, ring->type); - /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ - if (ring->cycle_state == 0) { - xhci_for_each_ring_seg(ring->first_seg, seg) { - for (int i = 0; i < TRBS_PER_SEGMENT; i++) - seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); - } - } - next = ring->enq_seg->next; xhci_link_segments(ring->enq_seg, first, ring->type, chain_links); xhci_link_segments(last, next, ring->type, chain_links); @@ -288,7 +287,8 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) kfree(ring); } -void xhci_initialize_ring_info(struct xhci_ring *ring) +void xhci_initialize_ring_info(struct xhci_ring *ring, + unsigned int cycle_state) { /* The ring is empty, so the enqueue pointer == dequeue pointer */ ring->enqueue = ring->first_seg->trbs; @@ -302,7 +302,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring) * New rings are initialized with cycle state equal to 1; if we are * handling ring expansion, set the cycle state equal to the old ring. */ - ring->cycle_state = 1; + ring->cycle_state = cycle_state; /* * Each segment has a link TRB, and leave an extra TRB for SW @@ -317,6 +317,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment **first, struct xhci_segment **last, unsigned int num_segs, + unsigned int cycle_state, enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) @@ -327,7 +328,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, chain_links = xhci_link_chain_quirk(xhci, type); - prev = xhci_segment_alloc(xhci, max_packet, num, flags); + prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags); if (!prev) return -ENOMEM; num++; @@ -336,7 +337,8 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, while (num < num_segs) { struct xhci_segment *next; - next = xhci_segment_alloc(xhci, max_packet, num, flags); + next = xhci_segment_alloc(xhci, cycle_state, max_packet, num, + flags); if (!next) goto free_segments; @@ -361,8 +363,9 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, * Set the end flag and the cycle toggle bit on the last segment. * See section 4.9.1 and figures 15 and 16. */ -struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, - enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) +struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, + unsigned int num_segs, unsigned int cycle_state, + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) { struct xhci_ring *ring; int ret; @@ -380,7 +383,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, return ring; ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs, - type, max_packet, flags); + cycle_state, type, max_packet, flags); if (ret) goto fail; @@ -390,7 +393,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE); } - xhci_initialize_ring_info(ring); + xhci_initialize_ring_info(ring, cycle_state); trace_xhci_ring_alloc(ring); return ring; @@ -418,8 +421,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *last; int ret; - ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->type, - ring->bounce_buf_len, flags); + ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->cycle_state, + ring->type, ring->bounce_buf_len, flags); if (ret) return -ENOMEM; @@ -629,7 +632,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { stream_info->stream_rings[cur_stream] = - xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags); + xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet, + mem_flags); cur_ring = stream_info->stream_rings[cur_stream]; if (!cur_ring) goto cleanup_rings; @@ -970,7 +974,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, } /* Allocate endpoint 0 ring */ - dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags); + dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags); if (!dev->eps[0].ring) goto fail; @@ -1453,7 +1457,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Set up the endpoint ring */ virt_dev->eps[ep_index].new_ring = - xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags); + xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); if (!virt_dev->eps[ep_index].new_ring) return -ENOMEM; @@ -2262,7 +2266,7 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags) if (!ir) return NULL; - ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags); + ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags); if (!ir->event_ring) { xhci_warn(xhci, "Failed to allocate interrupter event ring\n"); kfree(ir); @@ -2468,7 +2472,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) goto fail; /* Set up the command ring to have one segments for now. */ - xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags); + xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); if (!xhci->cmd_ring) goto fail; xhci_dbg_trace(xhci, trace_xhci_dbg_init, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1002fa51a25aa..f377725a12128 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1199,19 +1199,16 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, * Stopped state, but it will soon change to Running. * * Assume this bug on unexpected Stop Endpoint failures. - * Keep retrying until the EP starts and stops again. + * Keep retrying until the EP starts and stops again, on + * chips where this is known to help. Wait for 100ms. */ + if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) + break; fallthrough; case EP_STATE_RUNNING: /* Race, HW handled stop ep cmd before ep was running */ xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n", GET_EP_CTX_STATE(ep_ctx)); - /* - * Don't retry forever if we guessed wrong or a defective HC never starts - * the EP or says 'Running' but fails the command. We must give back TDs. - */ - if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) - break; command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) { diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3970ec831b8ca..abbf89e82d01a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -769,7 +769,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); } - xhci_initialize_ring_info(ring); + xhci_initialize_ring_info(ring, 1); /* * Reset the hardware dequeue pointer. * Yes, this will need to be re-written after resume, but we're paranoid diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index b2aeb444daaf5..b4fa8e7e43763 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1803,12 +1803,14 @@ void xhci_slot_copy(struct xhci_hcd *xhci, int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags); -struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, +struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, + unsigned int num_segs, unsigned int cycle_state, enum xhci_ring_type type, unsigned int max_packet, gfp_t flags); void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int num_trbs, gfp_t flags); -void xhci_initialize_ring_info(struct xhci_ring *ring); +void xhci_initialize_ring_info(struct xhci_ring *ring, + unsigned int cycle_state); void xhci_free_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index); diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 6497c4e81e951..9bf8fc6247bac 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -147,6 +147,7 @@ config USB_APPLEDISPLAY config USB_QCOM_EUD tristate "QCOM Embedded USB Debugger(EUD) Driver" depends on ARCH_QCOM || COMPILE_TEST + select QCOM_SCM select USB_ROLE_SWITCH help This module enables support for Qualcomm Technologies, Inc. diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c index 19906301a4eb8..012e3b9d9bcc8 100644 --- a/drivers/usb/misc/qcom_eud.c +++ b/drivers/usb/misc/qcom_eud.c @@ -15,6 +15,7 @@ #include #include #include +#include #define EUD_REG_INT1_EN_MASK 0x0024 #define EUD_REG_INT_STATUS_1 0x0044 @@ -34,7 +35,7 @@ struct eud_chip { struct device *dev; struct usb_role_switch *role_sw; void __iomem *base; - void __iomem *mode_mgr; + phys_addr_t mode_mgr; unsigned int int_status; int irq; bool enabled; @@ -43,18 +44,29 @@ struct eud_chip { static int enable_eud(struct eud_chip *priv) { + int ret; + + ret = qcom_scm_io_writel(priv->mode_mgr + EUD_REG_EUD_EN2, 1); + if (ret) + return ret; + writel(EUD_ENABLE, priv->base + EUD_REG_CSR_EUD_EN); writel(EUD_INT_VBUS | EUD_INT_SAFE_MODE, priv->base + EUD_REG_INT1_EN_MASK); - writel(1, priv->mode_mgr + EUD_REG_EUD_EN2); return usb_role_switch_set_role(priv->role_sw, USB_ROLE_DEVICE); } -static void disable_eud(struct eud_chip *priv) +static int disable_eud(struct eud_chip *priv) { + int ret; + + ret = qcom_scm_io_writel(priv->mode_mgr + EUD_REG_EUD_EN2, 0); + if (ret) + return ret; + writel(0, priv->base + EUD_REG_CSR_EUD_EN); - writel(0, priv->mode_mgr + EUD_REG_EUD_EN2); + return 0; } static ssize_t enable_show(struct device *dev, @@ -82,11 +94,12 @@ static ssize_t enable_store(struct device *dev, chip->enabled = enable; else disable_eud(chip); + } else { - disable_eud(chip); + ret = disable_eud(chip); } - return count; + return ret < 0 ? ret : count; } static DEVICE_ATTR_RW(enable); @@ -178,6 +191,7 @@ static void eud_role_switch_release(void *data) static int eud_probe(struct platform_device *pdev) { struct eud_chip *chip; + struct resource *res; int ret; chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); @@ -200,9 +214,10 @@ static int eud_probe(struct platform_device *pdev) if (IS_ERR(chip->base)) return PTR_ERR(chip->base); - chip->mode_mgr = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(chip->mode_mgr)) - return PTR_ERR(chip->mode_mgr); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) + return -ENODEV; + chip->mode_mgr = res->start; chip->irq = platform_get_irq(pdev, 0); if (chip->irq < 0) diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index da09cff55abce..0e732cd53b629 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -328,9 +328,8 @@ static int twl6030_set_vbus(struct phy_companion *comparator, bool enabled) static int twl6030_usb_probe(struct platform_device *pdev) { - u32 ret; struct twl6030_usb *twl; - int status, err; + int status, err, ret; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index fc869b7f803f0..5de856f65f0d5 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EM05CN 0x0312 #define QUECTEL_PRODUCT_EM05G_GR 0x0313 #define QUECTEL_PRODUCT_EM05G_RS 0x0314 +#define QUECTEL_PRODUCT_RG255C 0x0316 #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 #define QUECTEL_PRODUCT_RM520N 0x0801 @@ -617,6 +618,7 @@ static void option_instat_callback(struct urb *urb); #define UNISOC_VENDOR_ID 0x1782 /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */ #define TOZED_PRODUCT_LT70C 0x4055 +#define UNISOC_PRODUCT_UIS7720 0x4064 /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */ #define LUAT_PRODUCT_AIR720U 0x4e00 @@ -1270,6 +1272,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x40) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, @@ -1398,10 +1403,14 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | NCTRL(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */ .driver_info = NCTRL(4) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a3, 0xff), /* Telit FN920C04 (ECM) */ + .driver_info = NCTRL(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */ .driver_info = NCTRL(4) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a8, 0xff), /* Telit FN920C04 (ECM) */ + .driver_info = NCTRL(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */ @@ -2114,6 +2123,12 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ .driver_info = RSVD(7) }, + { USB_DEVICE(0x1e0e, 0x9071), /* Simcom SIM8230 RMNET mode */ + .driver_info = RSVD(3) | RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9078, 0xff), /* Simcom SIM8230 ECM mode */ + .driver_info = RSVD(5) }, + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x907b, 0xff), /* Simcom SIM8230 RNDIS mode */ + .driver_info = RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), @@ -2460,6 +2475,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, UNISOC_PRODUCT_UIS7720, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, { USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */ .driver_info = NCTRL(1) }, diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 92ce01b7d049f..b0e6c58e6a59c 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -7696,9 +7696,9 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) port->partner_desc.identity = &port->partner_ident; - port->role_sw = usb_role_switch_get(port->dev); + port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode); if (!port->role_sw) - port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode); + port->role_sw = usb_role_switch_get(port->dev); if (IS_ERR(port->role_sw)) { err = PTR_ERR(port->role_sw); goto out_destroy_wq; diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 7ee721a877c12..1bdf6f5538f44 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -545,24 +545,23 @@ static irqreturn_t cd321x_interrupt(int irq, void *data) if (!event) goto err_unlock; + tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event); + if (!tps6598x_read_status(tps, &status)) - goto err_clear_ints; + goto err_unlock; if (event & APPLE_CD_REG_INT_POWER_STATUS_UPDATE) if (!tps6598x_read_power_status(tps)) - goto err_clear_ints; + goto err_unlock; if (event & APPLE_CD_REG_INT_DATA_STATUS_UPDATE) if (!tps6598x_read_data_status(tps)) - goto err_clear_ints; + goto err_unlock; /* Handle plug insert or removal */ if (event & APPLE_CD_REG_INT_PLUG_EVENT) tps6598x_handle_plug_event(tps, status); -err_clear_ints: - tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event); - err_unlock: mutex_unlock(&tps->lock); @@ -668,25 +667,24 @@ static irqreturn_t tps6598x_interrupt(int irq, void *data) if (!(event1[0] | event1[1] | event2[0] | event2[1])) goto err_unlock; + tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len); + tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len); + if (!tps6598x_read_status(tps, &status)) - goto err_clear_ints; + goto err_unlock; if ((event1[0] | event2[0]) & TPS_REG_INT_POWER_STATUS_UPDATE) if (!tps6598x_read_power_status(tps)) - goto err_clear_ints; + goto err_unlock; if ((event1[0] | event2[0]) & TPS_REG_INT_DATA_STATUS_UPDATE) if (!tps6598x_read_data_status(tps)) - goto err_clear_ints; + goto err_unlock; /* Handle plug insert or removal */ if ((event1[0] | event2[0]) & TPS_REG_INT_PLUG_EVENT) tps6598x_handle_plug_event(tps, status); -err_clear_ints: - tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len); - tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len); - err_unlock: mutex_unlock(&tps->lock); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 8dac1edc74d4e..a793e30d46b78 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -764,6 +764,17 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag ctrlreq->wValue, vdev->rhport); vdev->udev = usb_get_dev(urb->dev); + /* + * NOTE: A similar operation has been done via + * USB_REQ_GET_DESCRIPTOR handler below, which is + * supposed to always precede USB_REQ_SET_ADDRESS. + * + * It's not entirely clear if operating on a different + * usb_device instance here is a real possibility, + * otherwise this call and vdev->udev assignment above + * should be dropped. + */ + dev_pm_syscore_device(&vdev->udev->dev, true); usb_put_dev(old); spin_lock(&vdev->ud.lock); @@ -784,6 +795,17 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n"); vdev->udev = usb_get_dev(urb->dev); + /* + * Set syscore PM flag for the virtually attached + * devices to ensure they will not enter suspend on + * the client side. + * + * Note this doesn't have any impact on the physical + * devices attached to the host system on the server + * side, hence there is no need to undo the operation + * on disconnect. + */ + dev_pm_syscore_device(&vdev->udev->dev, true); usb_put_dev(old); goto out; diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c index c51f5e4c3dd6d..481992142f790 100644 --- a/drivers/vfio/pci/pds/dirty.c +++ b/drivers/vfio/pci/pds/dirty.c @@ -82,7 +82,7 @@ static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_region *region, host_ack_bmp = vzalloc(bytes); if (!host_ack_bmp) { - bitmap_free(host_seq_bmp); + vfree(host_seq_bmp); return -ENOMEM; } diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 73e153f9b4495..781731eb95cfe 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -1191,6 +1191,7 @@ static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, struct iov_iter iter; u64 translated; int ret; + size_t size; ret = iotlb_translate(vrh, (u64)(uintptr_t)src, len - total_translated, &translated, @@ -1208,9 +1209,9 @@ static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, translated); } - ret = copy_from_iter(dst, translated, &iter); - if (ret < 0) - return ret; + size = copy_from_iter(dst, translated, &iter); + if (size != translated) + return -EFAULT; src += translated; dst += translated; @@ -1237,6 +1238,7 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, struct iov_iter iter; u64 translated; int ret; + size_t size; ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, len - total_translated, &translated, @@ -1254,9 +1256,9 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, translated); } - ret = copy_to_iter(src, translated, &iter); - if (ret < 0) - return ret; + size = copy_to_iter(src, translated, &iter); + if (size != translated) + return -EFAULT; src += translated; dst += translated; diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c index 4d1634c492ec4..594b60424d1c6 100644 --- a/drivers/video/fbdev/core/fb_cmdline.c +++ b/drivers/video/fbdev/core/fb_cmdline.c @@ -40,7 +40,7 @@ int fb_get_options(const char *name, char **option) bool enabled; if (name) - is_of = strncmp(name, "offb", 4); + is_of = !strncmp(name, "offb", 4); enabled = __video_get_options(name, &options, is_of); diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 893fd66b5269c..469f81f880f02 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2492,7 +2492,7 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font, unsigned charcount = font->charcount; int w = font->width; int h = font->height; - int size; + int size, alloc_size; int i, csum; u8 *new_data, *data = font->data; int pitch = PITCH(font->width); @@ -2519,9 +2519,16 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font, if (fbcon_invalid_charcount(info, charcount)) return -EINVAL; - size = CALC_FONTSZ(h, pitch, charcount); + /* Check for integer overflow in font size calculation */ + if (check_mul_overflow(h, pitch, &size) || + check_mul_overflow(size, charcount, &size)) + return -EINVAL; + + /* Check for overflow in allocation size calculation */ + if (check_add_overflow(FONT_EXTRA_WORDS * sizeof(int), size, &alloc_size)) + return -EINVAL; - new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); + new_data = kmalloc(alloc_size, GFP_USER); if (!new_data) return -ENOMEM; diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index be95fcddce4c8..85bc40aa8b2c6 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -93,6 +93,7 @@ struct simplefb_par { static void simplefb_clocks_destroy(struct simplefb_par *par); static void simplefb_regulators_destroy(struct simplefb_par *par); +static void simplefb_detach_genpds(void *res); /* * fb_ops.fb_destroy is called by the last put_fb_info() call at the end @@ -105,6 +106,7 @@ static void simplefb_destroy(struct fb_info *info) simplefb_regulators_destroy(info->par); simplefb_clocks_destroy(info->par); + simplefb_detach_genpds(info->par); if (info->screen_base) iounmap(info->screen_base); @@ -454,13 +456,14 @@ static void simplefb_detach_genpds(void *res) if (!IS_ERR_OR_NULL(par->genpds[i])) dev_pm_domain_detach(par->genpds[i], true); } + par->num_genpds = 0; } static int simplefb_attach_genpds(struct simplefb_par *par, struct platform_device *pdev) { struct device *dev = &pdev->dev; - unsigned int i; + unsigned int i, num_genpds; int err; err = of_count_phandle_with_args(dev->of_node, "power-domains", @@ -474,26 +477,35 @@ static int simplefb_attach_genpds(struct simplefb_par *par, return err; } - par->num_genpds = err; + num_genpds = err; /* * Single power-domain devices are handled by the driver core, so * nothing to do here. */ - if (par->num_genpds <= 1) + if (num_genpds <= 1) { + par->num_genpds = num_genpds; return 0; + } - par->genpds = devm_kcalloc(dev, par->num_genpds, sizeof(*par->genpds), + par->genpds = devm_kcalloc(dev, num_genpds, sizeof(*par->genpds), GFP_KERNEL); if (!par->genpds) return -ENOMEM; - par->genpd_links = devm_kcalloc(dev, par->num_genpds, + par->genpd_links = devm_kcalloc(dev, num_genpds, sizeof(*par->genpd_links), GFP_KERNEL); if (!par->genpd_links) return -ENOMEM; + /* + * Set par->num_genpds only after genpds and genpd_links are allocated + * to exit early from simplefb_detach_genpds() without full + * initialisation. + */ + par->num_genpds = num_genpds; + for (i = 0; i < par->num_genpds; i++) { par->genpds[i] = dev_pm_domain_attach_by_id(dev, i); if (IS_ERR(par->genpds[i])) { @@ -515,9 +527,10 @@ static int simplefb_attach_genpds(struct simplefb_par *par, dev_warn(dev, "failed to link power-domain %u\n", i); } - return devm_add_action_or_reset(dev, simplefb_detach_genpds, par); + return 0; } #else +static void simplefb_detach_genpds(void *res) { } static int simplefb_attach_genpds(struct simplefb_par *par, struct platform_device *pdev) { @@ -631,18 +644,20 @@ static int simplefb_probe(struct platform_device *pdev) ret = devm_aperture_acquire_for_platform_device(pdev, par->base, par->size); if (ret) { dev_err(&pdev->dev, "Unable to acquire aperture: %d\n", ret); - goto error_regulators; + goto error_genpds; } ret = register_framebuffer(info); if (ret < 0) { dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); - goto error_regulators; + goto error_genpds; } dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); return 0; +error_genpds: + simplefb_detach_genpds(par); error_regulators: simplefb_regulators_destroy(par); error_clocks: diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index 867f9f3113797..a4b497ecfa205 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c @@ -100,6 +100,8 @@ static int mpc8xxx_wdt_start(struct watchdog_device *w) ddata->swtc = tmp >> 16; set_bit(WDOG_HW_RUNNING, &ddata->wdd.status); + mpc8xxx_wdt_keepalive(ddata); + return 0; } diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 81effbd53dc52..ecd3ddda6ccbe 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -1320,14 +1320,17 @@ int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev, } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi); -static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) +static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn, + bool percpu) { struct evtchn_status status; evtchn_port_t port; - int rc = -ENOENT; + bool exists = false; memset(&status, 0, sizeof(status)); for (port = 0; port < xen_evtchn_max_channels(); port++) { + int rc; + status.dom = DOMID_SELF; status.port = port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); @@ -1335,12 +1338,16 @@ static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) continue; if (status.status != EVTCHNSTAT_virq) continue; - if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { + if (status.u.virq != virq) + continue; + if (status.vcpu == xen_vcpu_nr(cpu)) { *evtchn = port; - break; + return 0; + } else if (!percpu) { + exists = true; } } - return rc; + return exists ? -EEXIST : -ENOENT; } /** @@ -1387,8 +1394,11 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) evtchn = bind_virq.port; else { if (ret == -EEXIST) - ret = find_virq(virq, cpu, &evtchn); - BUG_ON(ret < 0); + ret = find_virq(virq, cpu, &evtchn, percpu); + if (ret) { + __unbind_from_irq(info, info->irq); + goto out; + } } ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq); @@ -1793,9 +1803,20 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu) * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. */ - if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) { + int old_cpu = info->cpu; + bind_evtchn_to_cpu(info, tcpu, false); + if (info->type == IRQT_VIRQ) { + int virq = info->u.virq; + int irq = per_cpu(virq_to_irq, old_cpu)[virq]; + + per_cpu(virq_to_irq, old_cpu)[virq] = -1; + per_cpu(virq_to_irq, tcpu)[virq] = irq; + } + } + do_unmask(info, EVT_MASK_REASON_TEMPORARY); return 0; diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index b4b4ebed68daf..8f543c8163478 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -116,7 +116,7 @@ static void do_suspend(void) err = dpm_suspend_start(PMSG_FREEZE); if (err) { pr_err("%s: dpm_suspend_start %d\n", __func__, err); - goto out_thaw; + goto out_resume_end; } printk(KERN_DEBUG "suspending xenstore...\n"); @@ -156,6 +156,7 @@ static void do_suspend(void) else xs_suspend_cancel(); +out_resume_end: dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); out_thaw: diff --git a/fs/afs/server.c b/fs/afs/server.c index 4504e16b458cc..5e40e1332259b 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -394,13 +394,14 @@ struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_tra void afs_put_server(struct afs_net *net, struct afs_server *server, enum afs_server_trace reason) { - unsigned int a, debug_id = server->debug_id; + unsigned int a, debug_id; bool zero; int r; if (!server) return; + debug_id = server->debug_id; a = atomic_read(&server->active); zero = __refcount_dec_and_test(&server->ref, &r); trace_afs_server(debug_id, r - 1, a, reason); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e655fa3bfd9be..3a73d218af464 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2100,10 +2100,10 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info, /* returns with log_tree_root freed on success */ ret = btrfs_recover_log_trees(log_tree_root); + btrfs_put_root(log_tree_root); if (ret) { btrfs_handle_fs_error(fs_info, ret, "Failed to recover log tree"); - btrfs_put_root(log_tree_root); return ret; } diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index e2b22bea348a3..c403117ac0136 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -23,7 +23,11 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, int type; if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) { - *max_len = BTRFS_FID_SIZE_CONNECTABLE; + if (btrfs_root_id(BTRFS_I(inode)->root) != + btrfs_root_id(BTRFS_I(parent)->root)) + *max_len = BTRFS_FID_SIZE_CONNECTABLE_ROOT; + else + *max_len = BTRFS_FID_SIZE_CONNECTABLE; return FILEID_INVALID; } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) { *max_len = BTRFS_FID_SIZE_NON_CONNECTABLE; @@ -45,6 +49,8 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, parent_root_id = btrfs_root_id(BTRFS_I(parent)->root); if (parent_root_id != fid->root_objectid) { + if (*max_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) + return FILEID_INVALID; fid->parent_root_objectid = parent_root_id; len = BTRFS_FID_SIZE_CONNECTABLE_ROOT; type = FILEID_BTRFS_WITH_PARENT_ROOT; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index bb3602059906d..7bab2512468d5 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4299,7 +4299,8 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, } static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info, - struct find_free_extent_ctl *ffe_ctl) + struct find_free_extent_ctl *ffe_ctl, + struct btrfs_space_info *space_info) { if (ffe_ctl->for_treelog) { spin_lock(&fs_info->treelog_bg_lock); @@ -4323,6 +4324,7 @@ static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info, u64 avail = block_group->zone_capacity - block_group->alloc_offset; if (block_group_bits(block_group, ffe_ctl->flags) && + block_group->space_info == space_info && avail >= ffe_ctl->num_bytes) { ffe_ctl->hint_byte = block_group->start; break; @@ -4344,7 +4346,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info, return prepare_allocation_clustered(fs_info, ffe_ctl, space_info, ins); case BTRFS_EXTENT_ALLOC_ZONED: - return prepare_allocation_zoned(fs_info, ffe_ctl); + return prepare_allocation_zoned(fs_info, ffe_ctl, space_info); default: BUG(); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index afebc91882bef..86a76efb21f63 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -355,6 +355,13 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, /* step one, find a bunch of delalloc bytes starting at start */ delalloc_start = *start; delalloc_end = 0; + + /* + * If @max_bytes is smaller than a block, btrfs_find_delalloc_range() can + * return early without handling any dirty ranges. + */ + ASSERT(max_bytes >= fs_info->sectorsize); + found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end, max_bytes, &cached_state); if (!found || delalloc_end <= *start || delalloc_start > orig_end) { @@ -385,13 +392,14 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, delalloc_end); ASSERT(!ret || ret == -EAGAIN); if (ret == -EAGAIN) { - /* some of the folios are gone, lets avoid looping by - * shortening the size of the delalloc range we're searching + /* + * Some of the folios are gone, lets avoid looping by + * shortening the size of the delalloc range we're searching. */ free_extent_state(cached_state); cached_state = NULL; if (!loops) { - max_bytes = PAGE_SIZE; + max_bytes = fs_info->sectorsize; loops = 1; goto again; } else { @@ -954,7 +962,7 @@ static void btrfs_readahead_expand(struct readahead_control *ractl, { const u64 ra_pos = readahead_pos(ractl); const u64 ra_end = ra_pos + readahead_length(ractl); - const u64 em_end = em->start + em->ram_bytes; + const u64 em_end = em->start + em->len; /* No expansion for holes and inline extents. */ if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE) @@ -1479,7 +1487,7 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode, struct btrfs_fs_info *fs_info = inode->root->fs_info; unsigned long range_bitmap = 0; bool submitted_io = false; - bool error = false; + int found_error = 0; const u64 folio_start = folio_pos(folio); u64 cur; int bit; @@ -1536,7 +1544,8 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode, */ btrfs_mark_ordered_io_finished(inode, folio, cur, fs_info->sectorsize, false); - error = true; + if (!found_error) + found_error = ret; continue; } submitted_io = true; @@ -1553,11 +1562,11 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode, * If we hit any error, the corresponding sector will still be dirty * thus no need to clear PAGECACHE_TAG_DIRTY. */ - if (!submitted_io && !error) { + if (!submitted_io && !found_error) { btrfs_folio_set_writeback(fs_info, folio, start, len); btrfs_folio_clear_writeback(fs_info, folio, start, len); } - return ret; + return found_error; } /* diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 51f286d5d00ab..8e31af036e75e 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -1108,14 +1108,15 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, * If ret is 1 (no key found), it means this is an empty block group, * without any extents allocated from it and there's no block group * item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree - * because we are using the block group tree feature, so block group - * items are stored in the block group tree. It also means there are no - * extents allocated for block groups with a start offset beyond this - * block group's end offset (this is the last, highest, block group). + * because we are using the block group tree feature (so block group + * items are stored in the block group tree) or this is a new block + * group created in the current transaction and its block group item + * was not yet inserted in the extent tree (that happens in + * btrfs_create_pending_block_groups() -> insert_block_group_item()). + * It also means there are no extents allocated for block groups with a + * start offset beyond this block group's end offset (this is the last, + * highest, block group). */ - if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE)) - ASSERT(ret == 0); - start = block_group->start; end = block_group->start + block_group->length; while (ret == 0) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 19c0ec9c327c1..e32dd4193aea1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3174,9 +3174,10 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) goto out; } - if (btrfs_is_zoned(fs_info)) - btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, - ordered_extent->disk_num_bytes); + ret = btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, + ordered_extent->disk_num_bytes); + if (ret) + goto out; if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { truncated = true; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 1706f6d9b12e6..03c3b5d0abbe4 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3852,7 +3852,7 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); if (!prealloc) { ret = -ENOMEM; - goto drop_write; + goto out; } } diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index 2928abf7eb827..fc46190d26c8e 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -998,11 +998,18 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info) if (!btrfs_test_opt(fs_info, REF_VERIFY)) return 0; + extent_root = btrfs_extent_root(fs_info, 0); + /* If the extent tree is damaged we cannot ignore it (IGNOREBADROOTS). */ + if (IS_ERR(extent_root)) { + btrfs_warn(fs_info, "ref-verify: extent tree not available, disabling"); + btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); + return 0; + } + path = btrfs_alloc_path(); if (!path) return -ENOMEM; - extent_root = btrfs_extent_root(fs_info, 0); eb = btrfs_read_lock_root_node(extent_root); level = btrfs_header_level(eb); path->nodes[level] = eb; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 79eb984041dd6..0d5a3846811ad 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3906,6 +3906,7 @@ static noinline_for_stack struct inode *create_reloc_inode( /* * Mark start of chunk relocation that is cancellable. Check if the cancellation * has been requested meanwhile and don't start in that case. + * NOTE: if this returns an error, reloc_chunk_end() must not be called. * * Return: * 0 success @@ -3922,10 +3923,8 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info) if (atomic_read(&fs_info->reloc_cancel_req) > 0) { btrfs_info(fs_info, "chunk relocation canceled on start"); - /* - * On cancel, clear all requests but let the caller mark - * the end after cleanup operations. - */ + /* On cancel, clear all requests. */ + clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags); atomic_set(&fs_info->reloc_cancel_req, 0); return -ECANCELED; } @@ -3934,9 +3933,11 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info) /* * Mark end of chunk relocation that is cancellable and wake any waiters. + * NOTE: call only if a previous call to reloc_chunk_start() succeeded. */ static void reloc_chunk_end(struct btrfs_fs_info *fs_info) { + ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)); /* Requested after start, clear bit first so any waiters can continue */ if (atomic_read(&fs_info->reloc_cancel_req) > 0) btrfs_info(fs_info, "chunk relocation canceled during operation"); @@ -4145,9 +4146,9 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) if (err && rw) btrfs_dec_block_group_ro(rc->block_group); iput(rc->data_inode); + reloc_chunk_end(fs_info); out_put_bg: btrfs_put_block_group(bg); - reloc_chunk_end(fs_info); free_reloc_control(rc); return err; } @@ -4331,8 +4332,8 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) ret = ret2; out_unset: unset_reloc_control(rc); -out_end: reloc_chunk_end(fs_info); +out_end: free_reloc_control(rc); out: free_reloc_roots(&reloc_roots); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 3fcc7c092c5ec..9a6e0b047d3b6 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1270,8 +1270,7 @@ static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *d * Slice is divided into intervals when the IO is submitted, adjust by * bwlimit and maximum of 64 intervals. */ - div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024))); - div = min_t(u32, 64, div); + div = clamp(bwlimit / (16 * 1024 * 1024), 1, 64); /* Start new epoch, set deadline */ now = ktime_get(); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 69f9d5f5cc3c6..b0d4ad7fbe489 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2029,7 +2029,13 @@ static int btrfs_get_tree_subvol(struct fs_context *fc) fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); if (!fs_info->super_copy || !fs_info->super_for_commit) { - btrfs_free_fs_info(fs_info); + /* + * Dont call btrfs_free_fs_info() to free it as it's still + * initialized partially. + */ + kfree(fs_info->super_copy); + kfree(fs_info->super_for_commit); + kvfree(fs_info); return -ENOMEM; } btrfs_init_fs_info(fs_info); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 1a029392eac52..f4dda72491feb 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1810,7 +1810,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, } /* see comments in should_cow_block() */ set_bit(BTRFS_ROOT_FORCE_COW, &root->state); - smp_wmb(); + smp_mb__after_atomic(); btrfs_set_root_node(new_root_item, tmp); /* record when the snapshot was created in key.offset */ diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 14f96d217e6e1..3bb7a376bd3fc 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -183,6 +183,7 @@ static bool check_prev_ino(struct extent_buffer *leaf, /* Only these key->types needs to be checked */ ASSERT(key->type == BTRFS_XATTR_ITEM_KEY || key->type == BTRFS_INODE_REF_KEY || + key->type == BTRFS_INODE_EXTREF_KEY || key->type == BTRFS_DIR_INDEX_KEY || key->type == BTRFS_DIR_ITEM_KEY || key->type == BTRFS_EXTENT_DATA_KEY); @@ -1770,6 +1771,39 @@ static int check_inode_ref(struct extent_buffer *leaf, return 0; } +static int check_inode_extref(struct extent_buffer *leaf, + struct btrfs_key *key, struct btrfs_key *prev_key, + int slot) +{ + unsigned long ptr = btrfs_item_ptr_offset(leaf, slot); + unsigned long end = ptr + btrfs_item_size(leaf, slot); + + if (unlikely(!check_prev_ino(leaf, key, slot, prev_key))) + return -EUCLEAN; + + while (ptr < end) { + struct btrfs_inode_extref *extref = (struct btrfs_inode_extref *)ptr; + u16 namelen; + + if (unlikely(ptr + sizeof(*extref) > end)) { + inode_ref_err(leaf, slot, + "inode extref overflow, ptr %lu end %lu inode_extref size %zu", + ptr, end, sizeof(*extref)); + return -EUCLEAN; + } + + namelen = btrfs_inode_extref_name_len(leaf, extref); + if (unlikely(ptr + sizeof(*extref) + namelen > end)) { + inode_ref_err(leaf, slot, + "inode extref overflow, ptr %lu end %lu namelen %u", + ptr, end, namelen); + return -EUCLEAN; + } + ptr += sizeof(*extref) + namelen; + } + return 0; +} + static int check_raid_stripe_extent(const struct extent_buffer *leaf, const struct btrfs_key *key, int slot) { @@ -1881,6 +1915,9 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf, case BTRFS_INODE_REF_KEY: ret = check_inode_ref(leaf, key, prev_key, slot); break; + case BTRFS_INODE_EXTREF_KEY: + ret = check_inode_extref(leaf, key, prev_key, slot); + break; case BTRFS_BLOCK_GROUP_ITEM_KEY: ret = check_block_group_item(leaf, key, slot); break; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 0022ad003791f..173e13e1d5b88 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -350,6 +350,7 @@ static int process_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, struct walk_control *wc, u64 gen, int level) { + struct btrfs_trans_handle *trans = wc->trans; struct btrfs_fs_info *fs_info = log->fs_info; int ret = 0; @@ -364,18 +365,29 @@ static int process_one_buffer(struct btrfs_root *log, }; ret = btrfs_read_extent_buffer(eb, &check); - if (ret) + if (ret) { + if (trans) + btrfs_abort_transaction(trans, ret); + else + btrfs_handle_fs_error(fs_info, ret, NULL); return ret; + } } if (wc->pin) { - ret = btrfs_pin_extent_for_log_replay(wc->trans, eb); - if (ret) + ASSERT(trans != NULL); + ret = btrfs_pin_extent_for_log_replay(trans, eb); + if (ret) { + btrfs_abort_transaction(trans, ret); return ret; + } if (btrfs_buffer_uptodate(eb, gen, 0) && - btrfs_header_level(eb) == 0) + btrfs_header_level(eb) == 0) { ret = btrfs_exclude_logged_extents(eb); + if (ret) + btrfs_abort_transaction(trans, ret); + } } return ret; } @@ -1766,6 +1778,8 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, else inc_nlink(vfs_inode); ret = btrfs_update_inode(trans, inode); + if (ret) + btrfs_abort_transaction(trans, ret); } else if (ret == -EEXIST) { ret = 0; } @@ -2431,15 +2445,13 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, int i; int ret; + if (level != 0) + return 0; + ret = btrfs_read_extent_buffer(eb, &check); if (ret) return ret; - level = btrfs_header_level(eb); - - if (level != 0) - return 0; - path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -2612,15 +2624,24 @@ static int unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start) static int clean_log_buffer(struct btrfs_trans_handle *trans, struct extent_buffer *eb) { + int ret; + btrfs_tree_lock(eb); btrfs_clear_buffer_dirty(trans, eb); wait_on_extent_buffer_writeback(eb); btrfs_tree_unlock(eb); - if (trans) - return btrfs_pin_reserved_extent(trans, eb); + if (trans) { + ret = btrfs_pin_reserved_extent(trans, eb); + if (ret) + btrfs_abort_transaction(trans, ret); + return ret; + } - return unaccount_log_buffer(eb->fs_info, eb->start); + ret = unaccount_log_buffer(eb->fs_info, eb->start); + if (ret) + btrfs_handle_fs_error(eb->fs_info, ret, NULL); + return ret; } static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, @@ -2656,8 +2677,14 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, next = btrfs_find_create_tree_block(fs_info, bytenr, btrfs_header_owner(cur), *level - 1); - if (IS_ERR(next)) - return PTR_ERR(next); + if (IS_ERR(next)) { + ret = PTR_ERR(next); + if (trans) + btrfs_abort_transaction(trans, ret); + else + btrfs_handle_fs_error(fs_info, ret, NULL); + return ret; + } if (*level == 1) { ret = wc->process_func(root, next, wc, ptr_gen, @@ -2672,6 +2699,10 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, ret = btrfs_read_extent_buffer(next, &check); if (ret) { free_extent_buffer(next); + if (trans) + btrfs_abort_transaction(trans, ret); + else + btrfs_handle_fs_error(fs_info, ret, NULL); return ret; } @@ -2687,6 +2718,10 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, ret = btrfs_read_extent_buffer(next, &check); if (ret) { free_extent_buffer(next); + if (trans) + btrfs_abort_transaction(trans, ret); + else + btrfs_handle_fs_error(fs_info, ret, NULL); return ret; } @@ -7422,7 +7457,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) log_root_tree->log_root = NULL; clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); - btrfs_put_root(log_root_tree); return 0; error: diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 58e0cac5779dd..ce991a8390466 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2699,6 +2699,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path goto error; } + if (bdev_nr_bytes(file_bdev(bdev_file)) <= BTRFS_DEVICE_RANGE_RESERVED) { + ret = -EINVAL; + goto error; + } + if (fs_devices->seeding) { seeding_dev = true; down_write(&sb->s_umount); diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 8e8edfe0c6190..64e0a5bf5f9a5 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1664,7 +1664,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) !fs_info->stripe_root) { btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree", btrfs_bg_type_to_raid_name(map->type)); - return -EINVAL; + ret = -EINVAL; } if (cache->alloc_offset > cache->zone_capacity) { @@ -2384,16 +2384,17 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags) return ret; } -void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length) +int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length) { struct btrfs_block_group *block_group; u64 min_alloc_bytes; if (!btrfs_is_zoned(fs_info)) - return; + return 0; block_group = btrfs_lookup_block_group(fs_info, logical); - ASSERT(block_group); + if (WARN_ON_ONCE(!block_group)) + return -ENOENT; /* No MIXED_BG on zoned btrfs. */ if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) @@ -2410,6 +2411,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len out: btrfs_put_block_group(block_group); + return 0; } static void btrfs_zone_finish_endio_workfn(struct work_struct *work) diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h index 7612e65726053..f7171ab6ed71e 100644 --- a/fs/btrfs/zoned.h +++ b/fs/btrfs/zoned.h @@ -83,7 +83,7 @@ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical, bool btrfs_zone_activate(struct btrfs_block_group *block_group); int btrfs_zone_finish(struct btrfs_block_group *block_group); bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags); -void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, +int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length); void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, struct extent_buffer *eb); @@ -232,8 +232,11 @@ static inline bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, return true; } -static inline void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, - u64 logical, u64 length) { } +static inline int btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, + u64 logical, u64 length) +{ + return 0; +} static inline void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, struct extent_buffer *eb) { } diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index b84d1747a0205..e7d192f7ab3b4 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -117,9 +117,18 @@ static struct inode *get_cramfs_inode(struct super_block *sb, inode_nohighmem(inode); inode->i_data.a_ops = &cramfs_aops; break; - default: + case S_IFCHR: + case S_IFBLK: + case S_IFIFO: + case S_IFSOCK: init_special_inode(inode, cramfs_inode->mode, old_decode_dev(cramfs_inode->size)); + break; + default: + printk(KERN_DEBUG "CRAMFS: Invalid file type 0%04o for inode %lu.\n", + inode->i_mode, inode->i_ino); + iget_failed(inode); + return ERR_PTR(-EIO); } inode->i_mode = cramfs_inode->mode; diff --git a/fs/dax.c b/fs/dax.c index 21b47402b3dca..756400f2a6257 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1578,7 +1578,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, if (iov_iter_rw(iter) == WRITE) { lockdep_assert_held_write(&iomi.inode->i_rwsem); iomi.flags |= IOMAP_WRITE; - } else { + } else if (!sb_rdonly(iomi.inode->i_sb)) { lockdep_assert_held(&iomi.inode->i_rwsem); } diff --git a/fs/dcache.c b/fs/dcache.c index 0f6b16ba30d08..d7814142ba7db 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2475,13 +2475,21 @@ struct dentry *d_alloc_parallel(struct dentry *parent, unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); struct hlist_bl_node *node; - struct dentry *new = d_alloc(parent, name); + struct dentry *new = __d_alloc(parent->d_sb, name); struct dentry *dentry; unsigned seq, r_seq, d_seq; if (unlikely(!new)) return ERR_PTR(-ENOMEM); + new->d_flags |= DCACHE_PAR_LOOKUP; + spin_lock(&parent->d_lock); + new->d_parent = dget_dlock(parent); + hlist_add_head(&new->d_sib, &parent->d_children); + if (parent->d_flags & DCACHE_DISCONNECTED) + new->d_flags |= DCACHE_DISCONNECTED; + spin_unlock(&parent->d_lock); + retry: rcu_read_lock(); seq = smp_load_acquire(&parent->d_inode->i_dir_seq); @@ -2565,8 +2573,6 @@ struct dentry *d_alloc_parallel(struct dentry *parent, return dentry; } rcu_read_unlock(); - /* we can't take ->d_lock here; it's OK, though. */ - new->d_flags |= DCACHE_PAR_LOOKUP; new->d_wait = wq; hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b); hlist_bl_unlock(b); diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 8afac6e2dff00..7b4b6977dcd66 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -730,7 +730,7 @@ static int release_lockspace(struct dlm_ls *ls, int force) dlm_device_deregister(ls); - if (force < 3 && dlm_user_daemon_available()) + if (force != 3 && dlm_user_daemon_available()) do_uevent(ls, 0); dlm_recoverd_stop(ls); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 075fee4ba29bc..8711fac20804c 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -46,10 +46,10 @@ * * 1) epnested_mutex (mutex) * 2) ep->mtx (mutex) - * 3) ep->lock (rwlock) + * 3) ep->lock (spinlock) * * The acquire order is the one listed above, from 1 to 3. - * We need a rwlock (ep->lock) because we manipulate objects + * We need a spinlock (ep->lock) because we manipulate objects * from inside the poll callback, that might be triggered from * a wake_up() that in turn might be called from IRQ context. * So we can't sleep inside the poll callback and hence we need @@ -195,7 +195,7 @@ struct eventpoll { struct list_head rdllist; /* Lock which protects rdllist and ovflist */ - rwlock_t lock; + spinlock_t lock; /* RB tree root used to store monitored fd structs */ struct rb_root_cached rbr; @@ -713,10 +713,10 @@ static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist) * in a lockless way. */ lockdep_assert_irqs_enabled(); - write_lock_irq(&ep->lock); + spin_lock_irq(&ep->lock); list_splice_init(&ep->rdllist, txlist); WRITE_ONCE(ep->ovflist, NULL); - write_unlock_irq(&ep->lock); + spin_unlock_irq(&ep->lock); } static void ep_done_scan(struct eventpoll *ep, @@ -724,7 +724,7 @@ static void ep_done_scan(struct eventpoll *ep, { struct epitem *epi, *nepi; - write_lock_irq(&ep->lock); + spin_lock_irq(&ep->lock); /* * During the time we spent inside the "sproc" callback, some * other events might have been queued by the poll callback. @@ -765,7 +765,7 @@ static void ep_done_scan(struct eventpoll *ep, wake_up(&ep->wq); } - write_unlock_irq(&ep->lock); + spin_unlock_irq(&ep->lock); } static void ep_get(struct eventpoll *ep) @@ -839,10 +839,10 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) rb_erase_cached(&epi->rbn, &ep->rbr); - write_lock_irq(&ep->lock); + spin_lock_irq(&ep->lock); if (ep_is_linked(epi)) list_del_init(&epi->rdllink); - write_unlock_irq(&ep->lock); + spin_unlock_irq(&ep->lock); wakeup_source_unregister(ep_wakeup_source(epi)); /* @@ -1123,7 +1123,7 @@ static int ep_alloc(struct eventpoll **pep) return -ENOMEM; mutex_init(&ep->mtx); - rwlock_init(&ep->lock); + spin_lock_init(&ep->lock); init_waitqueue_head(&ep->wq); init_waitqueue_head(&ep->poll_wait); INIT_LIST_HEAD(&ep->rdllist); @@ -1210,100 +1210,10 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, } #endif /* CONFIG_KCMP */ -/* - * Adds a new entry to the tail of the list in a lockless way, i.e. - * multiple CPUs are allowed to call this function concurrently. - * - * Beware: it is necessary to prevent any other modifications of the - * existing list until all changes are completed, in other words - * concurrent list_add_tail_lockless() calls should be protected - * with a read lock, where write lock acts as a barrier which - * makes sure all list_add_tail_lockless() calls are fully - * completed. - * - * Also an element can be locklessly added to the list only in one - * direction i.e. either to the tail or to the head, otherwise - * concurrent access will corrupt the list. - * - * Return: %false if element has been already added to the list, %true - * otherwise. - */ -static inline bool list_add_tail_lockless(struct list_head *new, - struct list_head *head) -{ - struct list_head *prev; - - /* - * This is simple 'new->next = head' operation, but cmpxchg() - * is used in order to detect that same element has been just - * added to the list from another CPU: the winner observes - * new->next == new. - */ - if (!try_cmpxchg(&new->next, &new, head)) - return false; - - /* - * Initially ->next of a new element must be updated with the head - * (we are inserting to the tail) and only then pointers are atomically - * exchanged. XCHG guarantees memory ordering, thus ->next should be - * updated before pointers are actually swapped and pointers are - * swapped before prev->next is updated. - */ - - prev = xchg(&head->prev, new); - - /* - * It is safe to modify prev->next and new->prev, because a new element - * is added only to the tail and new->next is updated before XCHG. - */ - - prev->next = new; - new->prev = prev; - - return true; -} - -/* - * Chains a new epi entry to the tail of the ep->ovflist in a lockless way, - * i.e. multiple CPUs are allowed to call this function concurrently. - * - * Return: %false if epi element has been already chained, %true otherwise. - */ -static inline bool chain_epi_lockless(struct epitem *epi) -{ - struct eventpoll *ep = epi->ep; - - /* Fast preliminary check */ - if (epi->next != EP_UNACTIVE_PTR) - return false; - - /* Check that the same epi has not been just chained from another CPU */ - if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR) - return false; - - /* Atomically exchange tail */ - epi->next = xchg(&ep->ovflist, epi); - - return true; -} - /* * This is the callback that is passed to the wait queue wakeup * mechanism. It is called by the stored file descriptors when they * have events to report. - * - * This callback takes a read lock in order not to contend with concurrent - * events from another file descriptor, thus all modifications to ->rdllist - * or ->ovflist are lockless. Read lock is paired with the write lock from - * ep_start/done_scan(), which stops all list modifications and guarantees - * that lists state is seen correctly. - * - * Another thing worth to mention is that ep_poll_callback() can be called - * concurrently for the same @epi from different CPUs if poll table was inited - * with several wait queues entries. Plural wakeup from different CPUs of a - * single wait queue is serialized by wq.lock, but the case when multiple wait - * queues are used should be detected accordingly. This is detected using - * cmpxchg() operation. */ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { @@ -1314,7 +1224,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v unsigned long flags; int ewake = 0; - read_lock_irqsave(&ep->lock, flags); + spin_lock_irqsave(&ep->lock, flags); ep_set_busy_poll_napi_id(epi); @@ -1343,12 +1253,15 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v * chained in ep->ovflist and requeued later on. */ if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { - if (chain_epi_lockless(epi)) + if (epi->next == EP_UNACTIVE_PTR) { + epi->next = READ_ONCE(ep->ovflist); + WRITE_ONCE(ep->ovflist, epi); ep_pm_stay_awake_rcu(epi); + } } else if (!ep_is_linked(epi)) { /* In the usual case, add event to ready list. */ - if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) - ep_pm_stay_awake_rcu(epi); + list_add_tail(&epi->rdllink, &ep->rdllist); + ep_pm_stay_awake_rcu(epi); } /* @@ -1381,7 +1294,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v pwake++; out_unlock: - read_unlock_irqrestore(&ep->lock, flags); + spin_unlock_irqrestore(&ep->lock, flags); /* We have to call this outside the lock */ if (pwake) @@ -1716,7 +1629,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, } /* We have to drop the new item inside our item list to keep track of it */ - write_lock_irq(&ep->lock); + spin_lock_irq(&ep->lock); /* record NAPI ID of new item if present */ ep_set_busy_poll_napi_id(epi); @@ -1733,7 +1646,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, pwake++; } - write_unlock_irq(&ep->lock); + spin_unlock_irq(&ep->lock); /* We have to call this outside the lock */ if (pwake) @@ -1797,7 +1710,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, * list, push it inside. */ if (ep_item_poll(epi, &pt, 1)) { - write_lock_irq(&ep->lock); + spin_lock_irq(&ep->lock); if (!ep_is_linked(epi)) { list_add_tail(&epi->rdllink, &ep->rdllist); ep_pm_stay_awake(epi); @@ -1808,7 +1721,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, if (waitqueue_active(&ep->poll_wait)) pwake++; } - write_unlock_irq(&ep->lock); + spin_unlock_irq(&ep->lock); } /* We have to call this outside the lock */ @@ -2041,7 +1954,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, init_wait(&wait); wait.func = ep_autoremove_wake_function; - write_lock_irq(&ep->lock); + spin_lock_irq(&ep->lock); /* * Barrierless variant, waitqueue_active() is called under * the same lock on wakeup ep_poll_callback() side, so it @@ -2060,7 +1973,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, if (!eavail) __add_wait_queue_exclusive(&ep->wq, &wait); - write_unlock_irq(&ep->lock); + spin_unlock_irq(&ep->lock); if (!eavail) timed_out = !schedule_hrtimeout_range(to, slack, @@ -2075,7 +1988,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, eavail = 1; if (!list_empty_careful(&wait.entry)) { - write_lock_irq(&ep->lock); + spin_lock_irq(&ep->lock); /* * If the thread timed out and is not on the wait queue, * it means that the thread was woken up after its @@ -2086,7 +1999,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, if (timed_out) eavail = list_empty(&wait.entry); __remove_wait_queue(&ep->wq, &wait); - write_unlock_irq(&ep->lock); + spin_unlock_irq(&ep->lock); } } } diff --git a/fs/exec.c b/fs/exec.c index d607943729638..030240d99ab7c 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -717,7 +717,7 @@ int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int executable_stack) { - unsigned long ret; + int ret; unsigned long stack_shift; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = bprm->vma; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index d8120b88fa00e..d8a059ec1ad62 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1969,6 +1969,16 @@ static inline bool ext4_verity_in_progress(struct inode *inode) #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime +/* + * Check whether the inode is tracked as orphan (either in orphan file or + * orphan list). + */ +static inline bool ext4_inode_orphan_tracked(struct inode *inode) +{ + return ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) || + !list_empty(&EXT4_I(inode)->i_orphan); +} + /* * Codes for operating systems */ @@ -3097,6 +3107,8 @@ extern struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, blk_opf_t op_flags); extern struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, sector_t block); +extern struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb, + sector_t block); extern void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags, bh_end_io_t *end_io, bool simu_fail); extern int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index da4a824563836..9e7275dd901f4 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -276,9 +276,16 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle, bh, is_metadata, inode->i_mode, test_opt(inode->i_sb, DATA_FLAGS)); - /* In the no journal case, we can just do a bforget and return */ + /* + * In the no journal case, we should wait for the ongoing buffer + * to complete and do a forget. + */ if (!ext4_handle_valid(handle)) { - bforget(bh); + if (bh) { + clear_buffer_dirty(bh); + wait_on_buffer(bh); + __bforget(bh); + } return 0; } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 6c692151b0d6c..7d949ed0ab5fa 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -354,7 +354,7 @@ static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc) * to cleanup the orphan list in ext4_handle_inode_extension(). Do it * now. */ - if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) { + if (ext4_inode_orphan_tracked(inode) && inode->i_nlink) { handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) { diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c index 91185c40f755a..22fc333244ef7 100644 --- a/fs/ext4/fsmap.c +++ b/fs/ext4/fsmap.c @@ -74,7 +74,8 @@ static int ext4_getfsmap_dev_compare(const void *p1, const void *p2) static bool ext4_getfsmap_rec_before_low_key(struct ext4_getfsmap_info *info, struct ext4_fsmap *rec) { - return rec->fmr_physical < info->gfi_low.fmr_physical; + return rec->fmr_physical + rec->fmr_length <= + info->gfi_low.fmr_physical; } /* @@ -200,15 +201,18 @@ static int ext4_getfsmap_meta_helper(struct super_block *sb, ext4_group_first_block_no(sb, agno)); fs_end = fs_start + EXT4_C2B(sbi, len); - /* Return relevant extents from the meta_list */ + /* + * Return relevant extents from the meta_list. We emit all extents that + * partially/fully overlap with the query range + */ list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) { - if (p->fmr_physical < info->gfi_next_fsblk) { + if (p->fmr_physical + p->fmr_length <= info->gfi_next_fsblk) { list_del(&p->fmr_list); kfree(p); continue; } - if (p->fmr_physical <= fs_start || - p->fmr_physical + p->fmr_length <= fs_end) { + if (p->fmr_physical <= fs_end && + p->fmr_physical + p->fmr_length > fs_start) { /* Emit the retained free extent record if present */ if (info->gfi_lastfree.fmr_owner) { error = ext4_getfsmap_helper(sb, info, diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index d45124318200d..da76353b3a575 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -1025,7 +1025,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, } /* Go read the buffer for the next level down */ - bh = ext4_sb_bread(inode->i_sb, nr, 0); + bh = ext4_sb_bread_nofail(inode->i_sb, nr); /* * A read failure? Report error and clear slot diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7923602271ad0..4ad34eba00a77 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3859,7 +3859,11 @@ int ext4_can_truncate(struct inode *inode) * We have to make sure i_disksize gets properly updated before we truncate * page cache due to hole punching or zero range. Otherwise i_disksize update * can get lost as it may have been postponed to submission of writeback but - * that will never happen after we truncate page cache. + * that will never happen if we remove the folio containing i_size from the + * page cache. Also if we punch hole within i_size but above i_disksize, + * following ext4_page_mkwrite() may mistakenly allocate written blocks over + * the hole and thus introduce allocated blocks beyond i_disksize which is + * not allowed (e2fsck would complain in case of crash). */ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, loff_t len) @@ -3870,9 +3874,11 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, loff_t size = i_size_read(inode); WARN_ON(!inode_is_locked(inode)); - if (offset > size || offset + len < size) + if (offset > size) return 0; + if (offset + len < size) + size = offset + len; if (EXT4_I(inode)->i_disksize >= size) return 0; @@ -4330,7 +4336,7 @@ static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode * old inodes get re-used with the upper 16 bits of the * uid/gid intact. */ - if (ei->i_dtime && list_empty(&ei->i_orphan)) { + if (ei->i_dtime && !ext4_inode_orphan_tracked(inode)) { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } else { @@ -4898,6 +4904,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, } ei->i_flags = le32_to_cpu(raw_inode->i_flags); ext4_set_inode_flags(inode, true); + /* Detect invalid flag combination - can't have both inline data and extents */ + if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) && + ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { + ext4_error_inode(inode, function, line, 0, + "inode has both inline data and extents flags"); + ret = -EFSCORRUPTED; + goto bad_inode; + } inode->i_blocks = ext4_inode_blocks(raw_inode, ei); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); if (ext4_has_feature_64bit(sb)) diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 898443e98efc9..a4c94eabc78ec 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -225,7 +225,7 @@ static int mext_page_mkuptodate(struct folio *folio, size_t from, size_t to) do { if (bh_offset(bh) + blocksize <= from) continue; - if (bh_offset(bh) > to) + if (bh_offset(bh) >= to) break; wait_on_buffer(bh); if (buffer_uptodate(bh)) diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c index a23b0c01f8096..05997b4d01203 100644 --- a/fs/ext4/orphan.c +++ b/fs/ext4/orphan.c @@ -109,11 +109,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) && !inode_is_locked(inode)); - /* - * Inode orphaned in orphan file or in orphan list? - */ - if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) || - !list_empty(&EXT4_I(inode)->i_orphan)) + if (ext4_inode_orphan_tracked(inode)) return 0; /* @@ -517,7 +513,7 @@ void ext4_release_orphan_info(struct super_block *sb) return; for (i = 0; i < oi->of_blocks; i++) brelse(oi->of_binfo[i].ob_bh); - kfree(oi->of_binfo); + kvfree(oi->of_binfo); } static struct ext4_orphan_block_tail *ext4_orphan_block_tail( @@ -588,9 +584,20 @@ int ext4_init_orphan_info(struct super_block *sb) ext4_msg(sb, KERN_ERR, "get orphan inode failed"); return PTR_ERR(inode); } + /* + * This is just an artificial limit to prevent corrupted fs from + * consuming absurd amounts of memory when pinning blocks of orphan + * file in memory. + */ + if (inode->i_size > 8 << 20) { + ext4_msg(sb, KERN_ERR, "orphan file too big: %llu", + (unsigned long long)inode->i_size); + ret = -EFSCORRUPTED; + goto out_put; + } oi->of_blocks = inode->i_size >> sb->s_blocksize_bits; oi->of_csum_seed = EXT4_I(inode)->i_csum_seed; - oi->of_binfo = kmalloc_array(oi->of_blocks, + oi->of_binfo = kvmalloc_array(oi->of_blocks, sizeof(struct ext4_orphan_block), GFP_KERNEL); if (!oi->of_binfo) { @@ -631,7 +638,7 @@ int ext4_init_orphan_info(struct super_block *sb) out_free: for (i--; i >= 0; i--) brelse(oi->of_binfo[i].ob_bh); - kfree(oi->of_binfo); + kvfree(oi->of_binfo); out_put: iput(inode); return ret; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 58d125ad23714..78ecdedadb076 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -266,6 +266,15 @@ struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, return __ext4_sb_bread_gfp(sb, block, 0, gfp); } +struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb, + sector_t block) +{ + gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping, + ~__GFP_FS) | __GFP_MOVABLE | __GFP_NOFAIL; + + return __ext4_sb_bread_gfp(sb, block, 0, gfp); +} + void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) { struct buffer_head *bh = bdev_getblk(sb->s_bdev, block, @@ -1461,9 +1470,9 @@ static void ext4_free_in_core_inode(struct inode *inode) static void ext4_destroy_inode(struct inode *inode) { - if (!list_empty(&(EXT4_I(inode)->i_orphan))) { + if (ext4_inode_orphan_tracked(inode)) { ext4_msg(inode->i_sb, KERN_ERR, - "Inode %lu (%p): orphan list check failed!", + "Inode %lu (%p): inode tracked as orphan!", inode->i_ino, EXT4_I(inode)); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, EXT4_I(inode), sizeof(struct ext4_inode_info), @@ -2484,7 +2493,7 @@ static int parse_apply_sb_mount_options(struct super_block *sb, struct ext4_fs_context *m_ctx) { struct ext4_sb_info *sbi = EXT4_SB(sb); - char *s_mount_opts = NULL; + char s_mount_opts[65]; struct ext4_fs_context *s_ctx = NULL; struct fs_context *fc = NULL; int ret = -ENOMEM; @@ -2492,15 +2501,11 @@ static int parse_apply_sb_mount_options(struct super_block *sb, if (!sbi->s_es->s_mount_opts[0]) return 0; - s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, - sizeof(sbi->s_es->s_mount_opts), - GFP_KERNEL); - if (!s_mount_opts) - return ret; + strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts); fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL); if (!fc) - goto out_free; + return -ENOMEM; s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); if (!s_ctx) @@ -2532,11 +2537,8 @@ static int parse_apply_sb_mount_options(struct super_block *sb, ret = 0; out_free: - if (fc) { - ext4_fc_free(fc); - kfree(fc); - } - kfree(s_mount_opts); + ext4_fc_free(fc); + kfree(fc); return ret; } diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 6ff94cdf1515c..5ddfa4801bb30 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -251,6 +251,10 @@ check_xattrs(struct inode *inode, struct buffer_head *bh, err_str = "invalid ea_ino"; goto errout; } + if (ea_ino && !size) { + err_str = "invalid size in ea xattr"; + goto errout; + } if (size > EXT4_XATTR_SIZE_MAX) { err_str = "e_value size too large"; goto errout; @@ -1036,7 +1040,7 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, int ref_change) { struct ext4_iloc iloc; - s64 ref_count; + u64 ref_count; int ret; inode_lock_nested(ea_inode, I_MUTEX_XATTR); @@ -1046,13 +1050,17 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, goto out; ref_count = ext4_xattr_inode_get_ref(ea_inode); + if ((ref_count == 0 && ref_change < 0) || (ref_count == U64_MAX && ref_change > 0)) { + ext4_error_inode(ea_inode, __func__, __LINE__, 0, + "EA inode %lu ref wraparound: ref_count=%lld ref_change=%d", + ea_inode->i_ino, ref_count, ref_change); + ret = -EFSCORRUPTED; + goto out; + } ref_count += ref_change; ext4_xattr_inode_set_ref(ea_inode, ref_count); if (ref_change > 0) { - WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld", - ea_inode->i_ino, ref_count); - if (ref_count == 1) { WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u", ea_inode->i_ino, ea_inode->i_nlink); @@ -1061,9 +1069,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, ext4_orphan_del(handle, ea_inode); } } else { - WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld", - ea_inode->i_ino, ref_count); - if (ref_count == 0) { WARN_ONCE(ea_inode->i_nlink != 1, "EA inode %lu i_nlink=%u", diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index efc30626760a6..bbb29b6024382 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1508,8 +1508,8 @@ static bool f2fs_map_blocks_cached(struct inode *inode, struct f2fs_dev_info *dev = &sbi->devs[bidx]; map->m_bdev = dev->bdev; - map->m_pblk -= dev->start_blk; map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk); + map->m_pblk -= dev->start_blk; } else { map->m_bdev = inode->i_sb->s_bdev; } @@ -1781,12 +1781,13 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag) if (map->m_flags & F2FS_MAP_MAPPED) { unsigned int ofs = start_pgofs - map->m_lblk; - f2fs_update_read_extent_cache_range(&dn, - start_pgofs, map->m_pblk + ofs, - map->m_len - ofs); + if (map->m_len > ofs) + f2fs_update_read_extent_cache_range(&dn, + start_pgofs, map->m_pblk + ofs, + map->m_len - ofs); } if (map->m_next_extent) - *map->m_next_extent = pgofs + 1; + *map->m_next_extent = is_hole ? pgofs + 1 : pgofs; } f2fs_put_dnode(&dn); unlock_out: diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 2dec22f2ea639..0d3ef487f72ac 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2331,8 +2331,6 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, { if (!inode) return true; - if (!test_opt(sbi, RESERVE_ROOT)) - return false; if (IS_NOQUOTA(inode)) return true; if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) @@ -2353,7 +2351,7 @@ static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi, avail_user_block_count = sbi->user_block_count - sbi->current_reserved_blocks; - if (!__allow_reserved_blocks(sbi, inode, cap)) + if (test_opt(sbi, RESERVE_ROOT) && !__allow_reserved_blocks(sbi, inode, cap)) avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index fa77841f3e2cc..6317dd523ecd1 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -35,15 +35,23 @@ #include #include -static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size) +static void f2fs_zero_post_eof_page(struct inode *inode, + loff_t new_size, bool lock) { loff_t old_size = i_size_read(inode); if (old_size >= new_size) return; + if (mapping_empty(inode->i_mapping)) + return; + + if (lock) + filemap_invalidate_lock(inode->i_mapping); /* zero or drop pages only in range of [old_size, new_size] */ - truncate_pagecache(inode, old_size); + truncate_inode_pages_range(inode->i_mapping, old_size, new_size); + if (lock) + filemap_invalidate_unlock(inode->i_mapping); } static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) @@ -114,9 +122,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); - filemap_invalidate_lock(inode->i_mapping); - f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT); - filemap_invalidate_unlock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT, true); file_update_time(vmf->vma->vm_file); filemap_invalidate_lock_shared(inode->i_mapping); @@ -856,8 +862,16 @@ int f2fs_truncate(struct inode *inode) /* we should check inline_data size */ if (!f2fs_may_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); - if (err) + if (err) { + /* + * Always truncate page #0 to avoid page cache + * leak in evict() path. + */ + truncate_inode_pages_range(inode->i_mapping, + F2FS_BLK_TO_BYTES(0), + F2FS_BLK_END_BYTES(0)); return err; + } } err = f2fs_truncate_blocks(inode, i_size_read(inode), true); @@ -1081,7 +1095,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, filemap_invalidate_lock(inode->i_mapping); if (attr->ia_size > old_size) - f2fs_zero_post_eof_page(inode, attr->ia_size); + f2fs_zero_post_eof_page(inode, attr->ia_size, false); truncate_setsize(inode, attr->ia_size); if (attr->ia_size <= old_size) @@ -1200,9 +1214,7 @@ static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len) if (ret) return ret; - filemap_invalidate_lock(inode->i_mapping); - f2fs_zero_post_eof_page(inode, offset + len); - filemap_invalidate_unlock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, offset + len, true); pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; @@ -1487,7 +1499,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); - f2fs_zero_post_eof_page(inode, offset + len); + f2fs_zero_post_eof_page(inode, offset + len, false); f2fs_lock_op(sbi); f2fs_drop_extent_tree(inode); @@ -1610,9 +1622,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, if (ret) return ret; - filemap_invalidate_lock(mapping); - f2fs_zero_post_eof_page(inode, offset + len); - filemap_invalidate_unlock(mapping); + f2fs_zero_post_eof_page(inode, offset + len, true); pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; @@ -1746,7 +1756,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(mapping); - f2fs_zero_post_eof_page(inode, offset + len); + f2fs_zero_post_eof_page(inode, offset + len, false); truncate_pagecache(inode, offset); while (!ret && idx > pg_start) { @@ -1804,9 +1814,7 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset, if (err) return err; - filemap_invalidate_lock(inode->i_mapping); - f2fs_zero_post_eof_page(inode, offset + len); - filemap_invalidate_unlock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, offset + len, true); f2fs_balance_fs(sbi, true); @@ -1828,18 +1836,20 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset, map.m_len = sec_blks; next_alloc: + f2fs_down_write(&sbi->pin_sem); + if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ? ZONED_PIN_SEC_REQUIRED_COUNT : GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { f2fs_down_write(&sbi->gc_lock); stat_inc_gc_call_count(sbi, FOREGROUND); err = f2fs_gc(sbi, &gc_control); - if (err && err != -ENODATA) + if (err && err != -ENODATA) { + f2fs_up_write(&sbi->pin_sem); goto out_err; + } } - f2fs_down_write(&sbi->pin_sem); - err = f2fs_allocate_pinning_section(sbi); if (err) { f2fs_up_write(&sbi->pin_sem); @@ -4751,9 +4761,8 @@ static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from) if (err) return err; - filemap_invalidate_lock(inode->i_mapping); - f2fs_zero_post_eof_page(inode, iocb->ki_pos + iov_iter_count(from)); - filemap_invalidate_unlock(inode->i_mapping); + f2fs_zero_post_eof_page(inode, + iocb->ki_pos + iov_iter_count(from), true); return count; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index e48b5e2efea28..8ac6206110a19 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -2749,7 +2749,7 @@ static int get_new_segment(struct f2fs_sb_info *sbi, MAIN_SECS(sbi)); if (secno >= MAIN_SECS(sbi)) { ret = -ENOSPC; - f2fs_bug_on(sbi, 1); + f2fs_bug_on(sbi, !pinning); goto out_unlock; } } @@ -2795,7 +2795,7 @@ static int get_new_segment(struct f2fs_sb_info *sbi, out_unlock: spin_unlock(&free_i->segmap_lock); - if (ret == -ENOSPC) + if (ret == -ENOSPC && !pinning) f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT); return ret; } @@ -2868,6 +2868,13 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) return curseg->segno; } +static void reset_curseg_fields(struct curseg_info *curseg) +{ + curseg->inited = false; + curseg->segno = NULL_SEGNO; + curseg->next_segno = 0; +} + /* * Allocate a current working segment. * This function always allocates a free segment in LFS manner. @@ -2886,7 +2893,7 @@ static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) ret = get_new_segment(sbi, &segno, new_sec, pinning); if (ret) { if (ret == -ENOSPC) - curseg->segno = NULL_SEGNO; + reset_curseg_fields(curseg); return ret; } @@ -3640,13 +3647,6 @@ static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi, get_random_u32_inclusive(1, sbi->max_fragment_hole); } -static void reset_curseg_fields(struct curseg_info *curseg) -{ - curseg->inited = false; - curseg->segno = NULL_SEGNO; - curseg->next_segno = 0; -} - int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, diff --git a/fs/file.c b/fs/file.c index bfc9eb9e72298..68c1bcc6b7e97 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1262,7 +1262,10 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags) err = expand_files(files, fd); if (unlikely(err < 0)) goto out_unlock; - return do_dup2(files, file, fd, flags); + err = do_dup2(files, file, fd, flags); + if (err < 0) + return err; + return 0; out_unlock: spin_unlock(&files->file_lock); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 4ae226778d646..28edfad85c628 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -446,22 +446,23 @@ static bool inode_do_switch_wbs(struct inode *inode, * Transfer to @new_wb's IO list if necessary. If the @inode is dirty, * the specific list @inode was on is ignored and the @inode is put on * ->b_dirty which is always correct including from ->b_dirty_time. - * The transfer preserves @inode->dirtied_when ordering. If the @inode - * was clean, it means it was on the b_attached list, so move it onto - * the b_attached list of @new_wb. + * If the @inode was clean, it means it was on the b_attached list, so + * move it onto the b_attached list of @new_wb. */ if (!list_empty(&inode->i_io_list)) { inode->i_wb = new_wb; if (inode->i_state & I_DIRTY_ALL) { - struct inode *pos; - - list_for_each_entry(pos, &new_wb->b_dirty, i_io_list) - if (time_after_eq(inode->dirtied_when, - pos->dirtied_when)) - break; + /* + * We need to keep b_dirty list sorted by + * dirtied_time_when. However properly sorting the + * inode in the list gets too expensive when switching + * many inodes. So just attach inode at the end of the + * dirty list and clobber the dirtied_time_when. + */ + inode->dirtied_time_when = jiffies; inode_io_list_move_locked(inode, new_wb, - pos->i_io_list.prev); + &new_wb->b_dirty); } else { inode_cgwb_move_to_attached(inode, new_wb); } @@ -503,6 +504,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) */ down_read(&bdi->wb_switch_rwsem); + inodep = isw->inodes; /* * By the time control reaches here, RCU grace period has passed * since I_WB_SWITCH assertion and all wb stat update transactions @@ -513,6 +515,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) * gives us exclusion against all wb related operations on @inode * including IO list manipulations and stat updates. */ +relock: if (old_wb < new_wb) { spin_lock(&old_wb->list_lock); spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING); @@ -521,10 +524,17 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING); } - for (inodep = isw->inodes; *inodep; inodep++) { + while (*inodep) { WARN_ON_ONCE((*inodep)->i_wb != old_wb); if (inode_do_switch_wbs(*inodep, old_wb, new_wb)) nr_switched++; + inodep++; + if (*inodep && need_resched()) { + spin_unlock(&new_wb->list_lock); + spin_unlock(&old_wb->list_lock); + cond_resched(); + goto relock; + } } spin_unlock(&new_wb->list_lock); diff --git a/fs/fsopen.c b/fs/fsopen.c index 6cef3deccdedf..8903d2c23db4e 100644 --- a/fs/fsopen.c +++ b/fs/fsopen.c @@ -18,50 +18,56 @@ #include "internal.h" #include "mount.h" +static inline const char *fetch_message_locked(struct fc_log *log, size_t len, + bool *need_free) +{ + const char *p; + int index; + + if (unlikely(log->head == log->tail)) + return ERR_PTR(-ENODATA); + + index = log->tail & (ARRAY_SIZE(log->buffer) - 1); + p = log->buffer[index]; + if (unlikely(strlen(p) > len)) + return ERR_PTR(-EMSGSIZE); + + log->buffer[index] = NULL; + *need_free = log->need_free & (1 << index); + log->need_free &= ~(1 << index); + log->tail++; + + return p; +} + /* * Allow the user to read back any error, warning or informational messages. + * Only one message is returned for each read(2) call. */ static ssize_t fscontext_read(struct file *file, char __user *_buf, size_t len, loff_t *pos) { struct fs_context *fc = file->private_data; - struct fc_log *log = fc->log.log; - unsigned int logsize = ARRAY_SIZE(log->buffer); - ssize_t ret; - char *p; + ssize_t err; + const char *p __free(kfree) = NULL, *message; bool need_free; - int index, n; + int n; - ret = mutex_lock_interruptible(&fc->uapi_mutex); - if (ret < 0) - return ret; - - if (log->head == log->tail) { - mutex_unlock(&fc->uapi_mutex); - return -ENODATA; - } - - index = log->tail & (logsize - 1); - p = log->buffer[index]; - need_free = log->need_free & (1 << index); - log->buffer[index] = NULL; - log->need_free &= ~(1 << index); - log->tail++; + err = mutex_lock_interruptible(&fc->uapi_mutex); + if (err < 0) + return err; + message = fetch_message_locked(fc->log.log, len, &need_free); mutex_unlock(&fc->uapi_mutex); + if (IS_ERR(message)) + return PTR_ERR(message); - ret = -EMSGSIZE; - n = strlen(p); - if (n > len) - goto err_free; - ret = -EFAULT; - if (copy_to_user(_buf, p, n) != 0) - goto err_free; - ret = n; - -err_free: if (need_free) - kfree(p); - return ret; + p = message; + + n = strlen(message); + if (copy_to_user(_buf, message, n)) + return -EFAULT; + return n; } static int fscontext_release(struct inode *inode, struct file *file) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 1f64ae6d7a69e..8207855f9af25 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1989,7 +1989,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, */ if (!oh.unique) { err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); - goto out; + goto copy_finish; } err = -EINVAL; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 49659d1b29321..a8218a3bc0b4a 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -355,8 +355,14 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, * Make the release synchronous if this is a fuseblk mount, * synchronous RELEASE is allowed (and desirable) in this case * because the server can be trusted not to screw up. + * + * Always use the asynchronous file put because the current thread + * might be the fuse server. This can happen if a process starts some + * aio and closes the fd before the aio completes. Since aio takes its + * own ref to the file, the IO completion has to drop the ref, which is + * how the fuse server can end up closing its clients' files. */ - fuse_file_put(ff, ff->fm->fc->destroy); + fuse_file_put(ff, false); } void fuse_release_common(struct file *file, bool isdir) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 161fc76ed5b0e..e5558e63e2cba 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -801,8 +801,6 @@ __acquires(&gl->gl_lockref.lock) clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); return; - } else { - clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); } } diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 9e27dd8bef88d..38ea69ca2303d 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -321,12 +321,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl) gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_update_request_times(gl); - /* don't want to call dlm if we've unmounted the lock protocol */ - if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { - gfs2_glock_free(gl); - return; - } - /* * When the lockspace is released, all remaining glocks will be * unlocked automatically. This is more efficient than unlocking them @@ -348,6 +342,11 @@ static void gdlm_put_lock(struct gfs2_glock *gl) goto again; } + if (error == -ENODEV) { + gfs2_glock_free(gl); + return; + } + if (error) { fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n", gl->gl_name.ln_type, diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c index 34e9804e0f360..e46f650b5e9c2 100644 --- a/fs/hfs/bfind.c +++ b/fs/hfs/bfind.c @@ -21,7 +21,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) fd->tree = tree; fd->bnode = NULL; - ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); + ptr = kzalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); if (!ptr) return -ENOMEM; fd->search_key = ptr; @@ -115,6 +115,12 @@ int hfs_brec_find(struct hfs_find_data *fd) __be32 data; int height, res; + fd->record = -1; + fd->keyoffset = -1; + fd->keylength = -1; + fd->entryoffset = -1; + fd->entrylength = -1; + tree = fd->tree; if (fd->bnode) hfs_bnode_put(fd->bnode); diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c index 896396554bcc1..b01db1fae147c 100644 --- a/fs/hfs/brec.c +++ b/fs/hfs/brec.c @@ -179,6 +179,7 @@ int hfs_brec_remove(struct hfs_find_data *fd) struct hfs_btree *tree; struct hfs_bnode *node, *parent; int end_off, rec_off, data_off, size; + int src, dst, len; tree = fd->tree; node = fd->bnode; @@ -208,10 +209,14 @@ int hfs_brec_remove(struct hfs_find_data *fd) } hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); - if (rec_off == end_off) - goto skip; size = fd->keylength + fd->entrylength; + if (rec_off == end_off) { + src = fd->keyoffset; + hfs_bnode_clear(node, src, size); + goto skip; + } + do { data_off = hfs_bnode_read_u16(node, rec_off); hfs_bnode_write_u16(node, rec_off + 2, data_off - size); @@ -219,9 +224,23 @@ int hfs_brec_remove(struct hfs_find_data *fd) } while (rec_off >= end_off); /* fill hole */ - hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size, - data_off - fd->keyoffset - size); + dst = fd->keyoffset; + src = fd->keyoffset + size; + len = data_off - src; + + hfs_bnode_move(node, dst, src, len); + + src = dst + len; + len = data_off - src; + + hfs_bnode_clear(node, src, len); + skip: + /* + * Remove the obsolete offset to free space. + */ + hfs_bnode_write_u16(node, end_off, 0); + hfs_bnode_dump(node); if (!fd->record) hfs_brec_update_parent(fd); diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c index 8082eb01127cd..bf811347bb07d 100644 --- a/fs/hfs/mdb.c +++ b/fs/hfs/mdb.c @@ -172,7 +172,7 @@ int hfs_mdb_get(struct super_block *sb) pr_warn("continuing without an alternate MDB\n"); } - HFS_SB(sb)->bitmap = kmalloc(8192, GFP_KERNEL); + HFS_SB(sb)->bitmap = kzalloc(8192, GFP_KERNEL); if (!HFS_SB(sb)->bitmap) goto out; diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c index 901e83d65d202..26ebac4c60424 100644 --- a/fs/hfsplus/bfind.c +++ b/fs/hfsplus/bfind.c @@ -18,7 +18,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) fd->tree = tree; fd->bnode = NULL; - ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); + ptr = kzalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); if (!ptr) return -ENOMEM; fd->search_key = ptr; @@ -158,6 +158,12 @@ int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare) __be32 data; int height, res; + fd->record = -1; + fd->keyoffset = -1; + fd->keylength = -1; + fd->entryoffset = -1; + fd->entrylength = -1; + tree = fd->tree; if (fd->bnode) hfs_bnode_put(fd->bnode); diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index 14f4995588ff0..407d5152eb411 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -18,47 +18,6 @@ #include "hfsplus_fs.h" #include "hfsplus_raw.h" -static inline -bool is_bnode_offset_valid(struct hfs_bnode *node, int off) -{ - bool is_valid = off < node->tree->node_size; - - if (!is_valid) { - pr_err("requested invalid offset: " - "NODE: id %u, type %#x, height %u, " - "node_size %u, offset %d\n", - node->this, node->type, node->height, - node->tree->node_size, off); - } - - return is_valid; -} - -static inline -int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) -{ - unsigned int node_size; - - if (!is_bnode_offset_valid(node, off)) - return 0; - - node_size = node->tree->node_size; - - if ((off + len) > node_size) { - int new_len = (int)node_size - off; - - pr_err("requested length has been corrected: " - "NODE: id %u, type %#x, height %u, " - "node_size %u, offset %d, " - "requested_len %d, corrected_len %d\n", - node->this, node->type, node->height, - node->tree->node_size, off, len, new_len); - - return new_len; - } - - return len; -} /* Copy a specified range of bytes from the raw data of a node */ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c index 9e1732a2b92a8..fe6a54c4083c3 100644 --- a/fs/hfsplus/btree.c +++ b/fs/hfsplus/btree.c @@ -393,6 +393,12 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) len = hfs_brec_lenoff(node, 2, &off16); off = off16; + if (!is_bnode_offset_valid(node, off)) { + hfs_bnode_put(node); + return ERR_PTR(-EIO); + } + len = check_and_correct_requested_length(node, off, len); + off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); data = kmap_local_page(*pagep); diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 5389918bbf29d..6c19935d6f505 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -575,6 +575,48 @@ hfsplus_btree_lock_class(struct hfs_btree *tree) return class; } +static inline +bool is_bnode_offset_valid(struct hfs_bnode *node, int off) +{ + bool is_valid = off < node->tree->node_size; + + if (!is_valid) { + pr_err("requested invalid offset: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d\n", + node->this, node->type, node->height, + node->tree->node_size, off); + } + + return is_valid; +} + +static inline +int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) +{ + unsigned int node_size; + + if (!is_bnode_offset_valid(node, off)) + return 0; + + node_size = node->tree->node_size; + + if ((off + len) > node_size) { + int new_len = (int)node_size - off; + + pr_err("requested length has been corrected: " + "NODE: id %u, type %#x, height %u, " + "node_size %u, offset %d, " + "requested_len %d, corrected_len %d\n", + node->this, node->type, node->height, + node->tree->node_size, off, len, new_len); + + return new_len; + } + + return len; +} + /* compatibility */ #define hfsp_mt2ut(t) (struct timespec64){ .tv_sec = __hfsp_mt2ut(t) } #define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec) diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 97920202790f9..0831cd7aa5deb 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -67,13 +67,26 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) if (!(inode->i_state & I_NEW)) return inode; - INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list); - spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock); - mutex_init(&HFSPLUS_I(inode)->extents_lock); - HFSPLUS_I(inode)->flags = 0; + atomic_set(&HFSPLUS_I(inode)->opencnt, 0); + HFSPLUS_I(inode)->first_blocks = 0; + HFSPLUS_I(inode)->clump_blocks = 0; + HFSPLUS_I(inode)->alloc_blocks = 0; + HFSPLUS_I(inode)->cached_start = U32_MAX; + HFSPLUS_I(inode)->cached_blocks = 0; + memset(HFSPLUS_I(inode)->first_extents, 0, sizeof(hfsplus_extent_rec)); + memset(HFSPLUS_I(inode)->cached_extents, 0, sizeof(hfsplus_extent_rec)); HFSPLUS_I(inode)->extent_state = 0; + mutex_init(&HFSPLUS_I(inode)->extents_lock); HFSPLUS_I(inode)->rsrc_inode = NULL; - atomic_set(&HFSPLUS_I(inode)->opencnt, 0); + HFSPLUS_I(inode)->create_date = 0; + HFSPLUS_I(inode)->linkid = 0; + HFSPLUS_I(inode)->flags = 0; + HFSPLUS_I(inode)->fs_blocks = 0; + HFSPLUS_I(inode)->userflags = 0; + HFSPLUS_I(inode)->subfolders = 0; + INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list); + spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock); + HFSPLUS_I(inode)->phys_size = 0; if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || inode->i_ino == HFSPLUS_ROOT_CNID) { @@ -531,7 +544,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { hfs_find_exit(&fd); if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) { - err = -EINVAL; + err = -EIO; goto out_put_root; } inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c index 36b6cf2a3abba..ebd326799f35a 100644 --- a/fs/hfsplus/unicode.c +++ b/fs/hfsplus/unicode.c @@ -40,6 +40,18 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1, p1 = s1->unicode; p2 = s2->unicode; + if (len1 > HFSPLUS_MAX_STRLEN) { + len1 = HFSPLUS_MAX_STRLEN; + pr_err("invalid length %u has been corrected to %d\n", + be16_to_cpu(s1->length), len1); + } + + if (len2 > HFSPLUS_MAX_STRLEN) { + len2 = HFSPLUS_MAX_STRLEN; + pr_err("invalid length %u has been corrected to %d\n", + be16_to_cpu(s2->length), len2); + } + while (1) { c1 = c2 = 0; @@ -74,6 +86,18 @@ int hfsplus_strcmp(const struct hfsplus_unistr *s1, p1 = s1->unicode; p2 = s2->unicode; + if (len1 > HFSPLUS_MAX_STRLEN) { + len1 = HFSPLUS_MAX_STRLEN; + pr_err("invalid length %u has been corrected to %d\n", + be16_to_cpu(s1->length), len1); + } + + if (len2 > HFSPLUS_MAX_STRLEN) { + len2 = HFSPLUS_MAX_STRLEN; + pr_err("invalid length %u has been corrected to %d\n", + be16_to_cpu(s2->length), len2); + } + for (len = min(len1, len2); len > 0; len--) { c1 = be16_to_cpu(*p1); c2 = be16_to_cpu(*p2); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index c0856585bb638..4aa9a1428dd58 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -594,14 +594,16 @@ static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, /* * If folio is mapped, it was faulted in after being - * unmapped in caller. Unmap (again) while holding - * the fault mutex. The mutex will prevent faults - * until we finish removing the folio. + * unmapped in caller or hugetlb_vmdelete_list() skips + * unmapping it due to fail to grab lock. Unmap (again) + * while holding the fault mutex. The mutex will prevent + * faults until we finish removing the folio. Hold folio + * lock to guarantee no concurrent migration. */ + folio_lock(folio); if (unlikely(folio_mapped(folio))) hugetlb_unmap_file_folio(h, mapping, folio, index); - folio_lock(folio); /* * We must remove the folio from page cache before removing * the region/ reserve map (hugetlb_unreserve_pages). In diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index f440110df93a9..ae43920ce395c 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1663,6 +1663,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh) int drop_reserve = 0; int err = 0; int was_modified = 0; + int wait_for_writeback = 0; if (is_handle_aborted(handle)) return -EROFS; @@ -1786,18 +1787,22 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh) } /* - * The buffer is still not written to disk, we should - * attach this buffer to current transaction so that the - * buffer can be checkpointed only after the current - * transaction commits. + * The buffer has not yet been written to disk. We should + * either clear the buffer or ensure that the ongoing I/O + * is completed, and attach this buffer to current + * transaction so that the buffer can be checkpointed only + * after the current transaction commits. */ clear_buffer_dirty(bh); + wait_for_writeback = 1; __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); spin_unlock(&journal->j_list_lock); } drop: __brelse(bh); spin_unlock(&jh->b_state_lock); + if (wait_for_writeback) + wait_on_buffer(bh); jbd2_journal_put_journal_head(jh); if (drop_reserve) { /* no need to reserve log space for this block -bzzz */ diff --git a/fs/minix/inode.c b/fs/minix/inode.c index f007e389d5d29..fc01f9dc8c391 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -491,8 +491,14 @@ void minix_set_inode(struct inode *inode, dev_t rdev) inode->i_op = &minix_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &minix_aops; - } else + } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { init_special_inode(inode, inode->i_mode, rdev); + } else { + printk(KERN_DEBUG "MINIX-fs: Invalid file type 0%04o for inode %lu.\n", + inode->i_mode, inode->i_ino); + make_bad_inode(inode); + } } /* diff --git a/fs/namei.c b/fs/namei.c index 6795600c5738a..1eb20cb5e58f9 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1388,6 +1388,10 @@ static int follow_automount(struct path *path, int *count, unsigned lookup_flags dentry->d_inode) return -EISDIR; + /* No need to trigger automounts if mountpoint crossing is disabled. */ + if (lookup_flags & LOOKUP_NO_XDEV) + return -EXDEV; + if (count && (*count)++ >= MAXSYMLINKS) return -ELOOP; @@ -1411,6 +1415,10 @@ static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped, /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (flags & DCACHE_MANAGE_TRANSIT) { + if (lookup_flags & LOOKUP_NO_XDEV) { + ret = -EXDEV; + break; + } ret = path->dentry->d_op->d_manage(path, false); flags = smp_load_acquire(&path->dentry->d_flags); if (ret < 0) diff --git a/fs/namespace.c b/fs/namespace.c index c8519302f5824..cc4926d53e7de 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -65,6 +65,15 @@ static int __init set_mphash_entries(char *str) } __setup("mphash_entries=", set_mphash_entries); +static char * __initdata initramfs_options; +static int __init initramfs_options_setup(char *str) +{ + initramfs_options = str; + return 1; +} + +__setup("initramfs_options=", initramfs_options_setup); + static u64 event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); @@ -144,7 +153,7 @@ static void mnt_ns_release(struct mnt_namespace *ns) lockdep_assert_not_held(&mnt_ns_tree_lock); /* keep alive for {list,stat}mount() */ - if (refcount_dec_and_test(&ns->passive)) { + if (ns && refcount_dec_and_test(&ns->passive)) { put_user_ns(ns->user_ns); kfree(ns); } @@ -5200,7 +5209,6 @@ static int grab_requested_root(struct mnt_namespace *ns, struct path *root) static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id, struct mnt_namespace *ns) { - struct path root __free(path_put) = {}; struct mount *m; int err; @@ -5212,7 +5220,7 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id, if (!s->mnt) return -ENOENT; - err = grab_requested_root(ns, &root); + err = grab_requested_root(ns, &s->root); if (err) return err; @@ -5221,15 +5229,13 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id, * mounts to show users. */ m = real_mount(s->mnt); - if (!is_path_reachable(m, m->mnt.mnt_root, &root) && + if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) && !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) return -EPERM; err = security_sb_statfs(s->mnt->mnt_root); if (err) return err; - - s->root = root; if (s->mask & STATMOUNT_SB_BASIC) statmount_sb_basic(s); @@ -5406,28 +5412,40 @@ SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req, if (!ret) ret = copy_statmount_to_user(ks); kvfree(ks->seq.buf); + path_put(&ks->root); if (retry_statmount(ret, &seq_size)) goto retry; return ret; } -static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id, - u64 last_mnt_id, u64 *mnt_ids, size_t nr_mnt_ids, - bool reverse) +struct klistmount { + u64 last_mnt_id; + u64 mnt_parent_id; + u64 *kmnt_ids; + u32 nr_mnt_ids; + struct mnt_namespace *ns; + struct path root; +}; + +static ssize_t do_listmount(struct klistmount *kls, bool reverse) { - struct path root __free(path_put) = {}; + struct mnt_namespace *ns = kls->ns; + u64 mnt_parent_id = kls->mnt_parent_id; + u64 last_mnt_id = kls->last_mnt_id; + u64 *mnt_ids = kls->kmnt_ids; + size_t nr_mnt_ids = kls->nr_mnt_ids; struct path orig; struct mount *r, *first; ssize_t ret; rwsem_assert_held(&namespace_sem); - ret = grab_requested_root(ns, &root); + ret = grab_requested_root(ns, &kls->root); if (ret) return ret; if (mnt_parent_id == LSMT_ROOT) { - orig = root; + orig = kls->root; } else { orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns); if (!orig.mnt) @@ -5439,7 +5457,7 @@ static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id, * Don't trigger audit denials. We just want to determine what * mounts to show users. */ - if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &root) && + if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &kls->root) && !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) return -EPERM; @@ -5472,14 +5490,45 @@ static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id, return ret; } +static void __free_klistmount_free(const struct klistmount *kls) +{ + path_put(&kls->root); + kvfree(kls->kmnt_ids); + mnt_ns_release(kls->ns); +} + +static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *kreq, + size_t nr_mnt_ids) +{ + + u64 last_mnt_id = kreq->param; + + /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */ + if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET) + return -EINVAL; + + kls->last_mnt_id = last_mnt_id; + + kls->nr_mnt_ids = nr_mnt_ids; + kls->kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kls->kmnt_ids), + GFP_KERNEL_ACCOUNT); + if (!kls->kmnt_ids) + return -ENOMEM; + + kls->ns = grab_requested_mnt_ns(kreq); + if (!kls->ns) + return -ENOENT; + + kls->mnt_parent_id = kreq->mnt_id; + return 0; +} + SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags) { - u64 *kmnt_ids __free(kvfree) = NULL; + struct klistmount kls __free(klistmount_free) = {}; const size_t maxcount = 1000000; - struct mnt_namespace *ns __free(mnt_ns_release) = NULL; struct mnt_id_req kreq; - u64 last_mnt_id; ssize_t ret; if (flags & ~LISTMOUNT_REVERSE) @@ -5500,31 +5549,20 @@ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, if (ret) return ret; - last_mnt_id = kreq.param; - /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */ - if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET) - return -EINVAL; - - kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kmnt_ids), - GFP_KERNEL_ACCOUNT); - if (!kmnt_ids) - return -ENOMEM; - - ns = grab_requested_mnt_ns(&kreq); - if (!ns) - return -ENOENT; + ret = prepare_klistmount(&kls, &kreq, nr_mnt_ids); + if (ret) + return ret; - if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) && - !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) + if (kreq.mnt_ns_id && (kls.ns != current->nsproxy->mnt_ns) && + !ns_capable_noaudit(kls.ns->user_ns, CAP_SYS_ADMIN)) return -ENOENT; scoped_guard(rwsem_read, &namespace_sem) - ret = do_listmount(ns, kreq.mnt_id, last_mnt_id, kmnt_ids, - nr_mnt_ids, (flags & LISTMOUNT_REVERSE)); + ret = do_listmount(&kls, (flags & LISTMOUNT_REVERSE)); if (ret <= 0) return ret; - if (copy_to_user(mnt_ids, kmnt_ids, ret * sizeof(*mnt_ids))) + if (copy_to_user(mnt_ids, kls.kmnt_ids, ret * sizeof(*mnt_ids))) return -EFAULT; return ret; @@ -5537,7 +5575,7 @@ static void __init init_mount_tree(void) struct mnt_namespace *ns; struct path root; - mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL); + mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", initramfs_options); if (IS_ERR(mnt)) panic("Can't create rootfs"); diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index 896d1d4219ed9..be77a137cc871 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -340,7 +340,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, folio_put(folio); ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1); if (ret < 0) - goto error_folio_unlock; + goto out; continue; copied: diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 6cf92498a5ac6..86bdc7d23fb90 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -211,10 +211,6 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion) return ERR_PTR(-ENOMEM); } cb_info->serv = serv; - /* As there is only one thread we need to over-ride the - * default maximum of 80 connections - */ - serv->sv_maxconn = 1024; dprintk("nfs_callback_create_svc: service created\n"); return serv; } diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index fdeb0b34a3d39..4254ba3ee7c57 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -984,6 +984,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp) nfs_put_client(cps.clp); goto out_invalidcred; } + svc_xprt_set_valid(rqstp->rq_xprt); } cps.minorversion = hdr_arg.minorversion; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ea92483d5e71e..c21e63027fc0e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -9393,7 +9393,7 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args goto out; if (rcvd->max_rqst_sz > sent->max_rqst_sz) return -EINVAL; - if (rcvd->max_resp_sz < sent->max_resp_sz) + if (rcvd->max_resp_sz > sent->max_resp_sz) return -EINVAL; if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) return -EINVAL; diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index 08a20e5bcf7fe..0822d8a119c6f 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -118,7 +118,6 @@ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp, struct iomap *iomaps, int nr_iomaps) { struct timespec64 mtime = inode_get_mtime(inode); - loff_t new_size = lcp->lc_last_wr + 1; struct iattr iattr = { .ia_valid = 0 }; int error; @@ -128,9 +127,9 @@ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp, iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME; iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime; - if (new_size > i_size_read(inode)) { + if (lcp->lc_size_chg) { iattr.ia_valid |= ATTR_SIZE; - iattr.ia_size = new_size; + iattr.ia_size = lcp->lc_newsize; } error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps, @@ -173,16 +172,20 @@ nfsd4_block_proc_getdeviceinfo(struct super_block *sb, } static __be32 -nfsd4_block_proc_layoutcommit(struct inode *inode, +nfsd4_block_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp, struct nfsd4_layoutcommit *lcp) { struct iomap *iomaps; int nr_iomaps; + __be32 nfserr; - nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, - lcp->lc_up_len, &iomaps, i_blocksize(inode)); - if (nr_iomaps < 0) - return nfserrno(nr_iomaps); + rqstp->rq_arg = lcp->lc_up_layout; + svcxdr_init_decode(rqstp); + + nfserr = nfsd4_block_decode_layoutupdate(&rqstp->rq_arg_stream, + &iomaps, &nr_iomaps, i_blocksize(inode)); + if (nfserr != nfs_ok) + return nfserr; return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps); } @@ -311,16 +314,20 @@ nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb, return nfserrno(nfsd4_block_get_device_info_scsi(sb, clp, gdp)); } static __be32 -nfsd4_scsi_proc_layoutcommit(struct inode *inode, +nfsd4_scsi_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp, struct nfsd4_layoutcommit *lcp) { struct iomap *iomaps; int nr_iomaps; + __be32 nfserr; + + rqstp->rq_arg = lcp->lc_up_layout; + svcxdr_init_decode(rqstp); - nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout, - lcp->lc_up_len, &iomaps, i_blocksize(inode)); - if (nr_iomaps < 0) - return nfserrno(nr_iomaps); + nfserr = nfsd4_scsi_decode_layoutupdate(&rqstp->rq_arg_stream, + &iomaps, &nr_iomaps, i_blocksize(inode)); + if (nfserr != nfs_ok) + return nfserr; return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps); } diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c index ce78f74715eea..e50afe3407371 100644 --- a/fs/nfsd/blocklayoutxdr.c +++ b/fs/nfsd/blocklayoutxdr.c @@ -29,8 +29,7 @@ nfsd4_block_encode_layoutget(struct xdr_stream *xdr, *p++ = cpu_to_be32(len); *p++ = cpu_to_be32(1); /* we always return a single extent */ - p = xdr_encode_opaque_fixed(p, &b->vol_id, - sizeof(struct nfsd4_deviceid)); + p = svcxdr_encode_deviceid4(p, &b->vol_id); p = xdr_encode_hyper(p, b->foff); p = xdr_encode_hyper(p, b->len); p = xdr_encode_hyper(p, b->soff); @@ -112,64 +111,86 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr, return 0; } -int -nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, - u32 block_size) +/** + * nfsd4_block_decode_layoutupdate - decode the block layout extent array + * @xdr: subbuf set to the encoded array + * @iomapp: pointer to store the decoded extent array + * @nr_iomapsp: pointer to store the number of extents + * @block_size: alignment of extent offset and length + * + * This function decodes the opaque field of the layoutupdate4 structure + * in a layoutcommit request for the block layout driver. The field is + * actually an array of extents sent by the client. It also checks that + * the file offset, storage offset and length of each extent are aligned + * by @block_size. + * + * Return values: + * %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid + * %nfserr_bad_xdr: The encoded array in @xdr is invalid + * %nfserr_inval: An unaligned extent found + * %nfserr_delay: Failed to allocate memory for @iomapp + */ +__be32 +nfsd4_block_decode_layoutupdate(struct xdr_stream *xdr, struct iomap **iomapp, + int *nr_iomapsp, u32 block_size) { struct iomap *iomaps; - u32 nr_iomaps, i; + u32 nr_iomaps, expected, len, i; + __be32 nfserr; - if (len < sizeof(u32)) { - dprintk("%s: extent array too small: %u\n", __func__, len); - return -EINVAL; - } - len -= sizeof(u32); - if (len % PNFS_BLOCK_EXTENT_SIZE) { - dprintk("%s: extent array invalid: %u\n", __func__, len); - return -EINVAL; - } + if (xdr_stream_decode_u32(xdr, &nr_iomaps)) + return nfserr_bad_xdr; - nr_iomaps = be32_to_cpup(p++); - if (nr_iomaps != len / PNFS_BLOCK_EXTENT_SIZE) { - dprintk("%s: extent array size mismatch: %u/%u\n", - __func__, len, nr_iomaps); - return -EINVAL; - } + len = sizeof(__be32) + xdr_stream_remaining(xdr); + expected = sizeof(__be32) + nr_iomaps * PNFS_BLOCK_EXTENT_SIZE; + if (len != expected) + return nfserr_bad_xdr; iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL); - if (!iomaps) { - dprintk("%s: failed to allocate extent array\n", __func__); - return -ENOMEM; - } + if (!iomaps) + return nfserr_delay; for (i = 0; i < nr_iomaps; i++) { struct pnfs_block_extent bex; - memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid)); - p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid)); + if (nfsd4_decode_deviceid4(xdr, &bex.vol_id)) { + nfserr = nfserr_bad_xdr; + goto fail; + } - p = xdr_decode_hyper(p, &bex.foff); + if (xdr_stream_decode_u64(xdr, &bex.foff)) { + nfserr = nfserr_bad_xdr; + goto fail; + } if (bex.foff & (block_size - 1)) { - dprintk("%s: unaligned offset 0x%llx\n", - __func__, bex.foff); + nfserr = nfserr_inval; + goto fail; + } + + if (xdr_stream_decode_u64(xdr, &bex.len)) { + nfserr = nfserr_bad_xdr; goto fail; } - p = xdr_decode_hyper(p, &bex.len); if (bex.len & (block_size - 1)) { - dprintk("%s: unaligned length 0x%llx\n", - __func__, bex.foff); + nfserr = nfserr_inval; + goto fail; + } + + if (xdr_stream_decode_u64(xdr, &bex.soff)) { + nfserr = nfserr_bad_xdr; goto fail; } - p = xdr_decode_hyper(p, &bex.soff); if (bex.soff & (block_size - 1)) { - dprintk("%s: unaligned disk offset 0x%llx\n", - __func__, bex.soff); + nfserr = nfserr_inval; + goto fail; + } + + if (xdr_stream_decode_u32(xdr, &bex.es)) { + nfserr = nfserr_bad_xdr; goto fail; } - bex.es = be32_to_cpup(p++); if (bex.es != PNFS_BLOCK_READWRITE_DATA) { - dprintk("%s: incorrect extent state %d\n", - __func__, bex.es); + nfserr = nfserr_inval; goto fail; } @@ -178,59 +199,79 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, } *iomapp = iomaps; - return nr_iomaps; + *nr_iomapsp = nr_iomaps; + return nfs_ok; fail: kfree(iomaps); - return -EINVAL; + return nfserr; } -int -nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, - u32 block_size) +/** + * nfsd4_scsi_decode_layoutupdate - decode the scsi layout extent array + * @xdr: subbuf set to the encoded array + * @iomapp: pointer to store the decoded extent array + * @nr_iomapsp: pointer to store the number of extents + * @block_size: alignment of extent offset and length + * + * This function decodes the opaque field of the layoutupdate4 structure + * in a layoutcommit request for the scsi layout driver. The field is + * actually an array of extents sent by the client. It also checks that + * the offset and length of each extent are aligned by @block_size. + * + * Return values: + * %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid + * %nfserr_bad_xdr: The encoded array in @xdr is invalid + * %nfserr_inval: An unaligned extent found + * %nfserr_delay: Failed to allocate memory for @iomapp + */ +__be32 +nfsd4_scsi_decode_layoutupdate(struct xdr_stream *xdr, struct iomap **iomapp, + int *nr_iomapsp, u32 block_size) { struct iomap *iomaps; - u32 nr_iomaps, expected, i; + u32 nr_iomaps, expected, len, i; + __be32 nfserr; - if (len < sizeof(u32)) { - dprintk("%s: extent array too small: %u\n", __func__, len); - return -EINVAL; - } + if (xdr_stream_decode_u32(xdr, &nr_iomaps)) + return nfserr_bad_xdr; - nr_iomaps = be32_to_cpup(p++); + len = sizeof(__be32) + xdr_stream_remaining(xdr); expected = sizeof(__be32) + nr_iomaps * PNFS_SCSI_RANGE_SIZE; - if (len != expected) { - dprintk("%s: extent array size mismatch: %u/%u\n", - __func__, len, expected); - return -EINVAL; - } + if (len != expected) + return nfserr_bad_xdr; iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL); - if (!iomaps) { - dprintk("%s: failed to allocate extent array\n", __func__); - return -ENOMEM; - } + if (!iomaps) + return nfserr_delay; for (i = 0; i < nr_iomaps; i++) { u64 val; - p = xdr_decode_hyper(p, &val); + if (xdr_stream_decode_u64(xdr, &val)) { + nfserr = nfserr_bad_xdr; + goto fail; + } if (val & (block_size - 1)) { - dprintk("%s: unaligned offset 0x%llx\n", __func__, val); + nfserr = nfserr_inval; goto fail; } iomaps[i].offset = val; - p = xdr_decode_hyper(p, &val); + if (xdr_stream_decode_u64(xdr, &val)) { + nfserr = nfserr_bad_xdr; + goto fail; + } if (val & (block_size - 1)) { - dprintk("%s: unaligned length 0x%llx\n", __func__, val); + nfserr = nfserr_inval; goto fail; } iomaps[i].length = val; } *iomapp = iomaps; - return nr_iomaps; + *nr_iomapsp = nr_iomaps; + return nfs_ok; fail: kfree(iomaps); - return -EINVAL; + return nfserr; } diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h index 4e28ac8f11279..7d25ef689671f 100644 --- a/fs/nfsd/blocklayoutxdr.h +++ b/fs/nfsd/blocklayoutxdr.h @@ -54,9 +54,9 @@ __be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr, const struct nfsd4_getdeviceinfo *gdp); __be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr, const struct nfsd4_layoutget *lgp); -int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, - u32 block_size); -int nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, - u32 block_size); +__be32 nfsd4_block_decode_layoutupdate(struct xdr_stream *xdr, + struct iomap **iomapp, int *nr_iomapsp, u32 block_size); +__be32 nfsd4_scsi_decode_layoutupdate(struct xdr_stream *xdr, + struct iomap **iomapp, int *nr_iomapsp, u32 block_size); #endif /* _NFSD_BLOCKLAYOUTXDR_H */ diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 49aede376d866..3216416ca0426 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1075,47 +1075,62 @@ static struct svc_export *exp_find(struct cache_detail *cd, } /** - * check_nfsd_access - check if access to export is allowed. + * check_xprtsec_policy - check if access to export is allowed by the + * xprtsec policy * @exp: svc_export that is being accessed. - * @rqstp: svc_rqst attempting to access @exp (will be NULL for LOCALIO). + * @rqstp: svc_rqst attempting to access @exp. + * + * Helper function for check_nfsd_access(). Note that callers should be + * using check_nfsd_access() instead of calling this function directly. The + * one exception is __fh_verify() since it has logic that may result in one + * or both of the helpers being skipped. * * Return values: * %nfs_ok if access is granted, or * %nfserr_wrongsec if access is denied */ -__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp) +__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp) { - struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors; - struct svc_xprt *xprt; - - /* - * If rqstp is NULL, this is a LOCALIO request which will only - * ever use a filehandle/credential pair for which access has - * been affirmed (by ACCESS or OPEN NFS requests) over the - * wire. So there is no need for further checks here. - */ - if (!rqstp) - return nfs_ok; - - xprt = rqstp->rq_xprt; + struct svc_xprt *xprt = rqstp->rq_xprt; if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) { if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags)) - goto ok; + return nfs_ok; } if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) { if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) && !test_bit(XPT_PEER_AUTH, &xprt->xpt_flags)) - goto ok; + return nfs_ok; } if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) { if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) && test_bit(XPT_PEER_AUTH, &xprt->xpt_flags)) - goto ok; + return nfs_ok; } - goto denied; + return nfserr_wrongsec; +} + +/** + * check_security_flavor - check if access to export is allowed by the + * security flavor + * @exp: svc_export that is being accessed. + * @rqstp: svc_rqst attempting to access @exp. + * @may_bypass_gss: reduce strictness of authorization check + * + * Helper function for check_nfsd_access(). Note that callers should be + * using check_nfsd_access() instead of calling this function directly. The + * one exception is __fh_verify() since it has logic that may result in one + * or both of the helpers being skipped. + * + * Return values: + * %nfs_ok if access is granted, or + * %nfserr_wrongsec if access is denied + */ +__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp, + bool may_bypass_gss) +{ + struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors; -ok: /* legacy gss-only clients are always OK: */ if (exp->ex_client == rqstp->rq_gssclient) return nfs_ok; @@ -1140,10 +1155,47 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp) if (nfsd4_spo_must_allow(rqstp)) return nfs_ok; -denied: + /* Some calls may be processed without authentication + * on GSS exports. For example NFS2/3 calls on root + * directory, see section 2.3.2 of rfc 2623. + * For "may_bypass_gss" check that export has really + * enabled some flavor with authentication (GSS or any + * other) and also check that the used auth flavor is + * without authentication (none or sys). + */ + if (may_bypass_gss && ( + rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL || + rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)) { + for (f = exp->ex_flavors; f < end; f++) { + if (f->pseudoflavor >= RPC_AUTH_DES) + return 0; + } + } + return nfserr_wrongsec; } +/** + * check_nfsd_access - check if access to export is allowed. + * @exp: svc_export that is being accessed. + * @rqstp: svc_rqst attempting to access @exp. + * @may_bypass_gss: reduce strictness of authorization check + * + * Return values: + * %nfs_ok if access is granted, or + * %nfserr_wrongsec if access is denied + */ +__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp, + bool may_bypass_gss) +{ + __be32 status; + + status = check_xprtsec_policy(exp, rqstp); + if (status != nfs_ok) + return status; + return check_security_flavor(exp, rqstp, may_bypass_gss); +} + /* * Uses rq_client and rq_gssclient to find an export; uses rq_client (an * auth_unix client) if it's available and has secinfo information; diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h index 3794ae253a701..0f4d34943e2c0 100644 --- a/fs/nfsd/export.h +++ b/fs/nfsd/export.h @@ -101,7 +101,11 @@ struct svc_expkey { struct svc_cred; int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp); -__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp); +__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp); +__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp, + bool may_bypass_gss); +__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp, + bool may_bypass_gss); /* * Function declarations diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c index 3ca5304440ff0..3c4419da5e24c 100644 --- a/fs/nfsd/flexfilelayout.c +++ b/fs/nfsd/flexfilelayout.c @@ -125,6 +125,13 @@ nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp, return 0; } +static __be32 +nfsd4_ff_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp, + struct nfsd4_layoutcommit *lcp) +{ + return nfs_ok; +} + const struct nfsd4_layout_ops ff_layout_ops = { .notify_types = NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE, @@ -133,4 +140,5 @@ const struct nfsd4_layout_ops ff_layout_ops = { .encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo, .proc_layoutget = nfsd4_ff_proc_layoutget, .encode_layoutget = nfsd4_ff_encode_layoutget, + .proc_layoutcommit = nfsd4_ff_proc_layoutcommit, }; diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c index aeb71c10ff1b9..f9f7e38cba13f 100644 --- a/fs/nfsd/flexfilelayoutxdr.c +++ b/fs/nfsd/flexfilelayoutxdr.c @@ -54,8 +54,7 @@ nfsd4_ff_encode_layoutget(struct xdr_stream *xdr, *p++ = cpu_to_be32(1); /* single mirror */ *p++ = cpu_to_be32(1); /* single data server */ - p = xdr_encode_opaque_fixed(p, &fl->deviceid, - sizeof(struct nfsd4_deviceid)); + p = svcxdr_encode_deviceid4(p, &fl->deviceid); *p++ = cpu_to_be32(1); /* efficiency */ diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c index 46a7f9b813e52..6b042218668b8 100644 --- a/fs/nfsd/lockd.c +++ b/fs/nfsd/lockd.c @@ -38,16 +38,40 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp, memcpy(&fh.fh_handle.fh_raw, f->data, f->size); fh.fh_export = NULL; + /* + * Allow BYPASS_GSS as some client implementations use AUTH_SYS + * for NLM even when GSS is used for NFS. + * Allow OWNER_OVERRIDE as permission might have been changed + * after the file was opened. + * Pass MAY_NLM so that authentication can be completely bypassed + * if NFSEXP_NOAUTHNLM is set. Some older clients use AUTH_NULL + * for NLM requests. + */ access = (mode == O_WRONLY) ? NFSD_MAY_WRITE : NFSD_MAY_READ; - access |= NFSD_MAY_LOCK; + access |= NFSD_MAY_NLM | NFSD_MAY_OWNER_OVERRIDE | NFSD_MAY_BYPASS_GSS; nfserr = nfsd_open(rqstp, &fh, S_IFREG, access, filp); fh_put(&fh); - /* We return nlm error codes as nlm doesn't know + /* We return nlm error codes as nlm doesn't know * about nfsd, but nfsd does know about nlm.. */ switch (nfserr) { case nfs_ok: return 0; + case nfserr_jukebox: + /* this error can indicate a presence of a conflicting + * delegation to an NLM lock request. Options are: + * (1) For now, drop this request and make the client + * retry. When delegation is returned, client's lock retry + * will complete. + * (2) NLM4_DENIED as per "spec" signals to the client + * that the lock is unavailable now but client can retry. + * Linux client implementation does not. It treats + * NLM4_DENIED same as NLM4_FAILED and errors the request. + * (3) For the future, treat this as blocked lock and try + * to callback when the delegation is returned but might + * not have a proper lock request to block on. + */ + fallthrough; case nfserr_dropit: return nlm_drop_reply; case nfserr_stale: diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 26f7b34d1a030..a05a45bb19781 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -129,8 +129,8 @@ struct nfsd_net { unsigned char writeverf[8]; /* - * Max number of connections this nfsd container will allow. Defaults - * to '0' which is means that it bases this on the number of threads. + * Max number of non-validated connections this nfsd container + * will allow. Defaults to '0' gets mapped to 64. */ unsigned int max_connections; diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index fbfddd3c4c943..fc5e82eddaa1a 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -120,7 +120,6 @@ nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp, id->fsid_idx = fhp->fh_export->ex_devid_map->idx; id->generation = device_generation; - id->pad = 0; return 0; } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 02c9f3b312a0e..6040de0923f85 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1372,7 +1372,7 @@ static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr, return 0; } if (work) { - strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr) - 1); + strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr)); refcount_set(&work->nsui_refcnt, 2); work->nsui_busy = true; list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list); @@ -2362,7 +2362,6 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, const struct nfsd4_layout_seg *seg = &lcp->lc_seg; struct svc_fh *current_fh = &cstate->current_fh; const struct nfsd4_layout_ops *ops; - loff_t new_size = lcp->lc_last_wr + 1; struct inode *inode; struct nfs4_layout_stateid *ls; __be32 nfserr; @@ -2378,18 +2377,20 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, goto out; inode = d_inode(current_fh->fh_dentry); - nfserr = nfserr_inval; - if (new_size <= seg->offset) { - dprintk("pnfsd: last write before layout segment\n"); - goto out; - } - if (new_size > seg->offset + seg->length) { - dprintk("pnfsd: last write beyond layout segment\n"); - goto out; - } - if (!lcp->lc_newoffset && new_size > i_size_read(inode)) { - dprintk("pnfsd: layoutcommit beyond EOF\n"); - goto out; + lcp->lc_size_chg = false; + if (lcp->lc_newoffset) { + loff_t new_size = lcp->lc_last_wr + 1; + + nfserr = nfserr_inval; + if (new_size <= seg->offset) + goto out; + if (new_size > seg->offset + seg->length) + goto out; + + if (new_size > i_size_read(inode)) { + lcp->lc_size_chg = true; + lcp->lc_newsize = new_size; + } } nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid, @@ -2406,14 +2407,7 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, /* LAYOUTCOMMIT does not require any serialization */ mutex_unlock(&ls->ls_mutex); - if (new_size > i_size_read(inode)) { - lcp->lc_size_chg = true; - lcp->lc_newsize = new_size; - } else { - lcp->lc_size_chg = false; - } - - nfserr = ops->proc_layoutcommit(inode, lcp); + nfserr = ops->proc_layoutcommit(inode, rqstp, lcp); nfs4_put_stid(&ls->ls_stid); out: return nfserr; @@ -2799,7 +2793,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) if (current_fh->fh_export && need_wrongsec_check(rqstp)) - op->status = check_nfsd_access(current_fh->fh_export, rqstp); + op->status = check_nfsd_access(current_fh->fh_export, rqstp, false); } encode_op: if (op->status == nfserr_replay_me) { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index bcb44400e2439..7b0fabf8c657a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -7998,11 +7998,9 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if (check_lock_length(lock->lk_offset, lock->lk_length)) return nfserr_inval; - if ((status = fh_verify(rqstp, &cstate->current_fh, - S_IFREG, NFSD_MAY_LOCK))) { - dprintk("NFSD: nfsd4_lock: permission denied!\n"); + status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); + if (status != nfs_ok) return status; - } sb = cstate->current_fh.fh_dentry->d_sb; if (lock->lk_is_new) { diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 6edeb3bdf81b5..66383eeeed15a 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -566,23 +566,13 @@ nfsd4_decode_state_owner4(struct nfsd4_compoundargs *argp, } #ifdef CONFIG_NFSD_PNFS -static __be32 -nfsd4_decode_deviceid4(struct nfsd4_compoundargs *argp, - struct nfsd4_deviceid *devid) -{ - __be32 *p; - - p = xdr_inline_decode(argp->xdr, NFS4_DEVICEID4_SIZE); - if (!p) - return nfserr_bad_xdr; - memcpy(devid, p, sizeof(*devid)); - return nfs_ok; -} static __be32 nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp, struct nfsd4_layoutcommit *lcp) { + u32 len; + if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_layout_type) < 0) return nfserr_bad_xdr; if (lcp->lc_layout_type < LAYOUT_NFSV4_1_FILES) @@ -590,13 +580,10 @@ nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp, if (lcp->lc_layout_type >= LAYOUT_TYPE_MAX) return nfserr_bad_xdr; - if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_up_len) < 0) + if (xdr_stream_decode_u32(argp->xdr, &len) < 0) + return nfserr_bad_xdr; + if (!xdr_stream_subsegment(argp->xdr, &lcp->lc_up_layout, len)) return nfserr_bad_xdr; - if (lcp->lc_up_len > 0) { - lcp->lc_up_layout = xdr_inline_decode(argp->xdr, lcp->lc_up_len); - if (!lcp->lc_up_layout) - return nfserr_bad_xdr; - } return nfs_ok; } @@ -1762,7 +1749,7 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp, __be32 status; memset(gdev, 0, sizeof(*gdev)); - status = nfsd4_decode_deviceid4(argp, &gdev->gd_devid); + status = nfsd4_decode_deviceid4(argp->xdr, &gdev->gd_devid); if (status) return status; if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_layout_type) < 0) @@ -3784,7 +3771,7 @@ nfsd4_encode_entry4_fattr(struct nfsd4_readdir *cd, const char *name, nfserr = nfserrno(err); goto out_put; } - nfserr = check_nfsd_access(exp, cd->rd_rqstp); + nfserr = check_nfsd_access(exp, cd->rd_rqstp, false); if (nfserr) goto out_put; diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 4b56ba1e8e48d..ae435444e8b3b 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -286,6 +286,7 @@ void nfsd_lockd_shutdown(void); #define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN) #define nfserr_locked cpu_to_be32(NFSERR_LOCKED) #define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC) +#define nfserr_delay cpu_to_be32(NFS4ERR_DELAY) #define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE) #define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT) #define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST) diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 96e19c50a5d7e..854dcdc36b2ce 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -320,6 +320,7 @@ __fh_verify(struct svc_rqst *rqstp, { struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct svc_export *exp = NULL; + bool may_bypass_gss = false; struct dentry *dentry; __be32 error; @@ -363,12 +364,31 @@ __fh_verify(struct svc_rqst *rqstp, goto out; /* - * pseudoflavor restrictions are not enforced on NLM, - * which clients virtually always use auth_sys for, - * even while using RPCSEC_GSS for NFS. + * If rqstp is NULL, this is a LOCALIO request which will only + * ever use a filehandle/credential pair for which access has + * been affirmed (by ACCESS or OPEN NFS requests) over the + * wire. Skip both the xprtsec policy and the security flavor + * checks. */ - if (access & NFSD_MAY_LOCK || access & NFSD_MAY_BYPASS_GSS) - goto skip_pseudoflavor_check; + if (!rqstp) + goto check_permissions; + + if ((access & NFSD_MAY_NLM) && (exp->ex_flags & NFSEXP_NOAUTHNLM)) + /* NLM is allowed to fully bypass authentication */ + goto out; + + /* + * NLM is allowed to bypass the xprtsec policy check because lockd + * doesn't support xprtsec. + */ + if (!(access & NFSD_MAY_NLM)) { + error = check_xprtsec_policy(exp, rqstp); + if (error) + goto out; + } + + if (access & NFSD_MAY_BYPASS_GSS) + may_bypass_gss = true; /* * Clients may expect to be able to use auth_sys during mount, * even if they use gss for everything else; see section 2.3.2 @@ -376,13 +396,17 @@ __fh_verify(struct svc_rqst *rqstp, */ if (access & NFSD_MAY_BYPASS_GSS_ON_ROOT && exp->ex_path.dentry == dentry) - goto skip_pseudoflavor_check; + may_bypass_gss = true; - error = check_nfsd_access(exp, rqstp); + error = check_security_flavor(exp, rqstp, may_bypass_gss); if (error) goto out; -skip_pseudoflavor_check: + /* During LOCALIO call to fh_verify will be called with a NULL rqstp */ + if (rqstp) + svc_xprt_set_valid(rqstp->rq_xprt); + +check_permissions: /* Finally, check access permissions. */ error = nfsd_permission(cred, exp, dentry, access); out: diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h index 925817f669176..dfd411d1f363f 100644 --- a/fs/nfsd/pnfs.h +++ b/fs/nfsd/pnfs.h @@ -35,6 +35,7 @@ struct nfsd4_layout_ops { const struct nfsd4_layoutget *lgp); __be32 (*proc_layoutcommit)(struct inode *inode, + struct svc_rqst *rqstp, struct nfsd4_layoutcommit *lcp); void (*fence_client)(struct nfs4_layout_stateid *ls, diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index b8470d4cbe99e..3448e444d4100 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -79,7 +79,7 @@ DEFINE_NFSD_XDR_ERR_EVENT(cant_encode); { NFSD_MAY_READ, "READ" }, \ { NFSD_MAY_SATTR, "SATTR" }, \ { NFSD_MAY_TRUNC, "TRUNC" }, \ - { NFSD_MAY_LOCK, "LOCK" }, \ + { NFSD_MAY_NLM, "NLM" }, \ { NFSD_MAY_OWNER_OVERRIDE, "OWNER_OVERRIDE" }, \ { NFSD_MAY_LOCAL_ACCESS, "LOCAL_ACCESS" }, \ { NFSD_MAY_BYPASS_GSS_ON_ROOT, "BYPASS_GSS_ON_ROOT" }, \ diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index ca29a5e1600fd..8c4f4e2f9cee0 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -321,7 +321,7 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name, err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry); if (err) return err; - err = check_nfsd_access(exp, rqstp); + err = check_nfsd_access(exp, rqstp, false); if (err) goto out; /* @@ -2519,7 +2519,7 @@ nfsd_permission(struct svc_cred *cred, struct svc_export *exp, (acc & NFSD_MAY_EXEC)? " exec" : "", (acc & NFSD_MAY_SATTR)? " sattr" : "", (acc & NFSD_MAY_TRUNC)? " trunc" : "", - (acc & NFSD_MAY_LOCK)? " lock" : "", + (acc & NFSD_MAY_NLM)? " nlm" : "", (acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "", inode->i_mode, IS_IMMUTABLE(inode)? " immut" : "", @@ -2544,16 +2544,6 @@ nfsd_permission(struct svc_cred *cred, struct svc_export *exp, if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode)) return nfserr_perm; - if (acc & NFSD_MAY_LOCK) { - /* If we cannot rely on authentication in NLM requests, - * just allow locks, otherwise require read permission, or - * ownership - */ - if (exp->ex_flags & NFSEXP_NOAUTHNLM) - return 0; - else - acc = NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE; - } /* * The file owner always gets access permission for accesses that * would normally be checked at open time. This is to make diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index 3ff1465225569..a61ada4fd9203 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -20,7 +20,7 @@ #define NFSD_MAY_READ 0x004 /* == MAY_READ */ #define NFSD_MAY_SATTR 0x008 #define NFSD_MAY_TRUNC 0x010 -#define NFSD_MAY_LOCK 0x020 +#define NFSD_MAY_NLM 0x020 /* request is from lockd */ #define NFSD_MAY_MASK 0x03f /* extra hints to permission and open routines: */ diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 2a21a7662e030..c75b295df206a 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -596,9 +596,43 @@ struct nfsd4_reclaim_complete { struct nfsd4_deviceid { u64 fsid_idx; u32 generation; - u32 pad; }; +static inline __be32 * +svcxdr_encode_deviceid4(__be32 *p, const struct nfsd4_deviceid *devid) +{ + __be64 *q = (__be64 *)p; + + *q = (__force __be64)devid->fsid_idx; + p += 2; + *p++ = (__force __be32)devid->generation; + *p++ = xdr_zero; + return p; +} + +static inline __be32 * +svcxdr_decode_deviceid4(__be32 *p, struct nfsd4_deviceid *devid) +{ + __be64 *q = (__be64 *)p; + + devid->fsid_idx = (__force u64)(*q); + p += 2; + devid->generation = (__force u32)(*p++); + p++; /* NFSD does not use the remaining octets */ + return p; +} + +static inline __be32 +nfsd4_decode_deviceid4(struct xdr_stream *xdr, struct nfsd4_deviceid *devid) +{ + __be32 *p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE); + + if (unlikely(!p)) + return nfserr_bad_xdr; + svcxdr_decode_deviceid4(p, devid); + return nfs_ok; +} + struct nfsd4_layout_seg { u32 iomode; u64 offset; @@ -631,8 +665,7 @@ struct nfsd4_layoutcommit { u64 lc_last_wr; /* request */ struct timespec64 lc_mtime; /* request */ u32 lc_layout_type; /* request */ - u32 lc_up_len; /* layout length */ - void *lc_up_layout; /* decoded by callback */ + struct xdr_buf lc_up_layout; /* decoded by callback */ bool lc_size_chg; /* response */ u64 lc_newsize; /* response */ }; diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c index e933f9c65d904..d2aa8985c0557 100644 --- a/fs/notify/fdinfo.c +++ b/fs/notify/fdinfo.c @@ -17,6 +17,7 @@ #include "fanotify/fanotify.h" #include "fdinfo.h" #include "fsnotify.h" +#include "../internal.h" #if defined(CONFIG_PROC_FS) @@ -46,7 +47,12 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode) size = f->handle_bytes >> 2; + if (!super_trylock_shared(inode->i_sb)) + return; + ret = exportfs_encode_fid(inode, (struct fid *)f->f_handle, &size); + up_read(&inode->i_sb->s_umount); + if ((ret == FILEID_INVALID) || (ret < 0)) return; diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c index cf4fe21a50399..29b585f443f3e 100644 --- a/fs/ntfs3/bitmap.c +++ b/fs/ntfs3/bitmap.c @@ -1399,6 +1399,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits) mark_buffer_dirty(bh); unlock_buffer(bh); /* err = sync_dirty_buffer(bh); */ + put_bh(bh); b0 = 0; bits -= op; diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 1bf2a6593dec6..6d1bf890929d9 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -1508,6 +1508,16 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni, bmp_size = bmp_size_v = le32_to_cpu(bmp->res.data_size); } + /* + * Index blocks exist, but $BITMAP has zero valid bits. + * This implies an on-disk corruption and must be rejected. + */ + if (in->name == I30_NAME && + unlikely(bmp_size_v == 0 && indx->alloc_run.count)) { + err = -EINVAL; + goto out1; + } + bit = bmp_size << 3; } diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c index 48566dff0dc92..662add939da78 100644 --- a/fs/ntfs3/run.c +++ b/fs/ntfs3/run.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "debug.h" #include "ntfs.h" @@ -982,14 +983,18 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, if (!dlcn) return -EINVAL; - lcn = prev_lcn + dlcn; + + if (check_add_overflow(prev_lcn, dlcn, &lcn)) + return -EINVAL; prev_lcn = lcn; } else { /* The size of 'dlcn' can't be > 8. */ return -EINVAL; } - next_vcn = vcn64 + len; + if (check_add_overflow(vcn64, len, &next_vcn)) + return -EINVAL; + /* Check boundary. */ if (next_vcn > evcn + 1) return -EINVAL; @@ -1153,7 +1158,8 @@ int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn) return -EINVAL; run_buf += size_size + offset_size; - vcn64 += len; + if (check_add_overflow(vcn64, len, &vcn64)) + return -EINVAL; #ifndef CONFIG_NTFS3_64BIT_CLUSTER if (vcn64 > 0x100000000ull) diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index f9d6a4f9ca921..aa595cd1ab6fe 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -868,6 +868,11 @@ static int __ocfs2_move_extents_range(struct buffer_head *di_bh, mlog_errno(ret); goto out; } + /* + * Invalidate extent cache after moving/defragging to prevent + * stale cached data with outdated extent flags. + */ + ocfs2_extent_map_trunc(inode, cpos); context->clusters_moved += alloc_size; next: diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index 77edcd70f72c2..c5236b3ed168f 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -1018,6 +1018,7 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn) printk(KERN_ERR "ocfs2: Could not determine" " locking version\n"); user_cluster_disconnect(conn); + lc = NULL; goto out; } wait_event(lc->oc_wait, (atomic_read(&lc->oc_this_node) > 0)); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 2257bf52fb2a4..8f5ad591d7625 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -2259,6 +2259,9 @@ static void pagemap_scan_backout_range(struct pagemap_scan_private *p, { struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; + if (!p->vec_buf) + return; + if (cur_buf->start != addr) cur_buf->end = addr; else diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 71c0ce31a4c4d..94825180385ab 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -163,6 +163,9 @@ static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; /* SLAB cache for dquot structures */ static struct kmem_cache *dquot_cachep; +/* workqueue for work quota_release_work*/ +static struct workqueue_struct *quota_unbound_wq; + void register_quota_format(struct quota_format_type *fmt) { spin_lock(&dq_list_lock); @@ -882,7 +885,7 @@ void dqput(struct dquot *dquot) put_releasing_dquots(dquot); atomic_dec(&dquot->dq_count); spin_unlock(&dq_list_lock); - queue_delayed_work(system_unbound_wq, "a_release_work, 1); + queue_delayed_work(quota_unbound_wq, "a_release_work, 1); } EXPORT_SYMBOL(dqput); @@ -3042,6 +3045,11 @@ static int __init dquot_init(void) shrinker_register(dqcache_shrinker); + quota_unbound_wq = alloc_workqueue("quota_events_unbound", + WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_MAX_ACTIVE); + if (!quota_unbound_wq) + panic("Cannot create quota_unbound_wq\n"); + return 0; } fs_initcall(dquot_init); diff --git a/fs/read_write.c b/fs/read_write.c index befec0b5c537a..46408bab92385 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1600,6 +1600,13 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, if (len == 0) return 0; + /* + * Make sure return value doesn't overflow in 32bit compat mode. Also + * limit the size for all cases except when calling ->copy_file_range(). + */ + if (splice || !file_out->f_op->copy_file_range || in_compat_syscall()) + len = min_t(size_t, MAX_RW_COUNT, len); + file_start_write(file_out); /* @@ -1613,9 +1620,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, len, flags); } else if (!splice && file_in->f_op->remap_file_range && samesb) { ret = file_in->f_op->remap_file_range(file_in, pos_in, - file_out, pos_out, - min_t(loff_t, MAX_RW_COUNT, len), - REMAP_FILE_CAN_SHORTEN); + file_out, pos_out, len, REMAP_FILE_CAN_SHORTEN); /* fallback to splice */ if (ret <= 0) splice = true; @@ -1648,8 +1653,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, * to splicing from input file, while file_start_write() is held on * the output file on a different sb. */ - ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out, - min_t(size_t, len, MAX_RW_COUNT), 0); + ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out, len, 0); done: if (ret > 0) { fsnotify_access(file_in); diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index b74637ae9085a..6a35e884b41fc 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -703,7 +703,7 @@ struct TCP_Server_Info { bool nosharesock; bool tcp_nodelay; bool terminate; - unsigned int credits; /* send no more requests at once */ + int credits; /* send no more requests at once */ unsigned int max_credits; /* can override large 32000 default at mnt */ unsigned int in_flight; /* number of requests on the wire to server */ unsigned int max_in_flight; /* max number of requests that were on wire */ diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index e06d02b68c538..4862a9518a321 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -2381,8 +2381,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, tcon = tlink_tcon(tlink); server = tcon->ses->server; - if (!server->ops->rename) - return -ENOSYS; + if (!server->ops->rename) { + rc = -ENOSYS; + goto do_rename_exit; + } /* try path-based rename first */ rc = server->ops->rename(xid, tcon, from_dentry, diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c index 499f791df7799..3f3f184f7fb97 100644 --- a/fs/smb/client/misc.c +++ b/fs/smb/client/misc.c @@ -913,6 +913,14 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, char *data_end; struct dfs_referral_level_3 *ref; + if (rsp_size < sizeof(*rsp)) { + cifs_dbg(VFS | ONCE, + "%s: header is malformed (size is %u, must be %zu)\n", + __func__, rsp_size, sizeof(*rsp)); + rc = -EINVAL; + goto parse_DFS_referrals_exit; + } + *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals); if (*num_of_nodes < 1) { @@ -922,6 +930,15 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, goto parse_DFS_referrals_exit; } + if (sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3) > rsp_size) { + cifs_dbg(VFS | ONCE, + "%s: malformed buffer (size is %u, must be at least %zu)\n", + __func__, rsp_size, + sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3)); + rc = -EINVAL; + goto parse_DFS_referrals_exit; + } + ref = (struct dfs_referral_level_3 *) &(rsp->referrals); if (ref->VersionNumber != cpu_to_le16(3)) { cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n", diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c index 0385a514f59e9..4670c3e451a75 100644 --- a/fs/smb/client/smb1ops.c +++ b/fs/smb/client/smb1ops.c @@ -671,14 +671,72 @@ static int cifs_query_path_info(const unsigned int xid, } #ifdef CONFIG_CIFS_XATTR + /* + * For non-symlink WSL reparse points it is required to fetch + * EA $LXMOD which contains in its S_DT part the mandatory file type. + */ + if (!rc && data->reparse_point) { + struct smb2_file_full_ea_info *ea; + u32 next = 0; + + ea = (struct smb2_file_full_ea_info *)data->wsl.eas; + do { + ea = (void *)((u8 *)ea + next); + next = le32_to_cpu(ea->next_entry_offset); + } while (next); + if (le16_to_cpu(ea->ea_value_length)) { + ea->next_entry_offset = cpu_to_le32(ALIGN(sizeof(*ea) + + ea->ea_name_length + 1 + + le16_to_cpu(ea->ea_value_length), 4)); + ea = (void *)((u8 *)ea + le32_to_cpu(ea->next_entry_offset)); + } + + rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_MODE, + &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1], + SMB2_WSL_XATTR_MODE_SIZE, cifs_sb); + if (rc == SMB2_WSL_XATTR_MODE_SIZE) { + ea->next_entry_offset = cpu_to_le32(0); + ea->flags = 0; + ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN; + ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_MODE_SIZE); + memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_MODE, SMB2_WSL_XATTR_NAME_LEN + 1); + data->wsl.eas_len += ALIGN(sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 + + SMB2_WSL_XATTR_MODE_SIZE, 4); + rc = 0; + } else if (rc >= 0) { + /* It is an error if EA $LXMOD has wrong size. */ + rc = -EINVAL; + } else { + /* + * In all other cases ignore error if fetching + * of EA $LXMOD failed. It is needed only for + * non-symlink WSL reparse points and wsl_to_fattr() + * handle the case when EA is missing. + */ + rc = 0; + } + } + /* * For WSL CHR and BLK reparse points it is required to fetch * EA $LXDEV which contains major and minor device numbers. */ if (!rc && data->reparse_point) { struct smb2_file_full_ea_info *ea; + u32 next = 0; ea = (struct smb2_file_full_ea_info *)data->wsl.eas; + do { + ea = (void *)((u8 *)ea + next); + next = le32_to_cpu(ea->next_entry_offset); + } while (next); + if (le16_to_cpu(ea->ea_value_length)) { + ea->next_entry_offset = cpu_to_le32(ALIGN(sizeof(*ea) + + ea->ea_name_length + 1 + + le16_to_cpu(ea->ea_value_length), 4)); + ea = (void *)((u8 *)ea + le32_to_cpu(ea->next_entry_offset)); + } + rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_DEV, &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1], SMB2_WSL_XATTR_DEV_SIZE, cifs_sb); @@ -688,8 +746,8 @@ static int cifs_query_path_info(const unsigned int xid, ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN; ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_DEV_SIZE); memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_DEV, SMB2_WSL_XATTR_NAME_LEN + 1); - data->wsl.eas_len = sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 + - SMB2_WSL_XATTR_DEV_SIZE; + data->wsl.eas_len += ALIGN(sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 + + SMB2_WSL_XATTR_MODE_SIZE, 4); rc = 0; } else if (rc >= 0) { /* It is an error if EA $LXDEV has wrong size. */ diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c index b51ccfb884394..cb049bc70e0cb 100644 --- a/fs/smb/client/smb2inode.c +++ b/fs/smb/client/smb2inode.c @@ -641,7 +641,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, tmp_rc = rc; for (i = 0; i < num_cmds; i++) { - char *buf = rsp_iov[i + i].iov_base; + char *buf = rsp_iov[i + 1].iov_base; if (buf && resp_buftype[i + 1] != CIFS_NO_BUFFER) rc = server->ops->map_error(buf, false); @@ -1220,31 +1220,33 @@ int smb2_set_file_info(struct inode *inode, const char *full_path, FILE_BASIC_INFO *buf, const unsigned int xid) { - struct cifs_open_parms oparms; + struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), }; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct cifsFileInfo *cfile = NULL; + struct cifs_open_parms oparms; struct tcon_link *tlink; struct cifs_tcon *tcon; - struct cifsFileInfo *cfile; - struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), }; - int rc; - - if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && - (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) && - (buf->Attributes == 0)) - return 0; /* would be a no op, no sense sending this */ + int rc = 0; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); + if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && + (buf->LastWriteTime == 0) && (buf->ChangeTime == 0)) { + if (buf->Attributes == 0) + goto out; /* would be a no op, no sense sending this */ + cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); + } + oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, ACL_NO_MODE); rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, &in_iov, &(int){SMB2_OP_SET_INFO}, 1, cfile, NULL, NULL, NULL); +out: cifs_put_tlink(tlink); return rc; } diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index ab911a9672465..35299967737f1 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -3134,8 +3134,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb, utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) { rc = -ENOMEM; - free_xid(xid); - return ERR_PTR(rc); + goto put_tlink; } oparms = (struct cifs_open_parms) { @@ -3167,6 +3166,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb, SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); } +put_tlink: cifs_put_tlink(tlink); free_xid(xid); @@ -3207,8 +3207,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen, utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) { rc = -ENOMEM; - free_xid(xid); - return rc; + goto put_tlink; } oparms = (struct cifs_open_parms) { @@ -3229,6 +3228,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen, SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); } +put_tlink: cifs_put_tlink(tlink); free_xid(xid); return rc; @@ -4223,7 +4223,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len, static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst, int num_rqst, const u8 *sig, u8 **iv, struct aead_request **req, struct sg_table *sgt, - unsigned int *num_sgs, size_t *sensitive_size) + unsigned int *num_sgs) { unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm); unsigned int iv_size = crypto_aead_ivsize(tfm); @@ -4240,9 +4240,8 @@ static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst len += req_size; len = ALIGN(len, __alignof__(struct scatterlist)); len += array_size(*num_sgs, sizeof(struct scatterlist)); - *sensitive_size = len; - p = kvzalloc(len, GFP_NOFS); + p = kzalloc(len, GFP_NOFS); if (!p) return ERR_PTR(-ENOMEM); @@ -4256,16 +4255,14 @@ static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst static void *smb2_get_aead_req(struct crypto_aead *tfm, struct smb_rqst *rqst, int num_rqst, const u8 *sig, u8 **iv, - struct aead_request **req, struct scatterlist **sgl, - size_t *sensitive_size) + struct aead_request **req, struct scatterlist **sgl) { struct sg_table sgtable = {}; unsigned int skip, num_sgs, i, j; ssize_t rc; void *p; - p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable, - &num_sgs, sensitive_size); + p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable, &num_sgs); if (IS_ERR(p)) return ERR_CAST(p); @@ -4354,7 +4351,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, DECLARE_CRYPTO_WAIT(wait); unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); void *creq; - size_t sensitive_size; rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key); if (rc) { @@ -4380,8 +4376,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, return rc; } - creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg, - &sensitive_size); + creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg); if (IS_ERR(creq)) return PTR_ERR(creq); @@ -4411,7 +4406,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, if (!rc && enc) memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); - kvfree_sensitive(creq, sensitive_size); + kfree_sensitive(creq); return rc; } @@ -4662,7 +4657,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, unsigned int pad_len; struct cifs_io_subrequest *rdata = mid->callback_data; struct smb2_hdr *shdr = (struct smb2_hdr *)buf; - int length; + size_t copied; bool use_rdma_mr = false; if (shdr->Command != SMB2_READ) { @@ -4775,10 +4770,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, } else if (buf_len >= data_offset + data_len) { /* read response payload is in buf */ WARN_ONCE(buffer, "read data can be either in buf or in buffer"); - length = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter); - if (length < 0) - return length; - rdata->got_bytes = data_len; + copied = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter); + if (copied == 0) + return -EIO; + rdata->got_bytes = copied; } else { /* read response payload cannot be in both buf and pages */ WARN_ONCE(1, "buf can not contain only a part of read data"); diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h index 3f07a612c05b4..8ccd57fd904bc 100644 --- a/fs/smb/server/ksmbd_netlink.h +++ b/fs/smb/server/ksmbd_netlink.h @@ -112,10 +112,11 @@ struct ksmbd_startup_request { __u32 smbd_max_io_size; /* smbd read write size */ __u32 max_connections; /* Number of maximum simultaneous connections */ __s8 bind_interfaces_only; - __s8 reserved[503]; /* Reserved room */ + __u32 max_ip_connections; /* Number of maximum connection per ip address */ + __s8 reserved[499]; /* Reserved room */ __u32 ifc_list_sz; /* interfaces list size */ __s8 ____payload[]; -}; +} __packed; #define KSMBD_STARTUP_CONFIG_INTERFACES(s) ((s)->____payload) diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c index 9dec4c2940bc0..00805aed0b07d 100644 --- a/fs/smb/server/mgmt/user_session.c +++ b/fs/smb/server/mgmt/user_session.c @@ -104,29 +104,32 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name) if (!entry) return -ENOMEM; - down_read(&sess->rpc_lock); entry->method = method; entry->id = id = ksmbd_ipc_id_alloc(); if (id < 0) goto free_entry; + + down_write(&sess->rpc_lock); old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP); - if (xa_is_err(old)) + if (xa_is_err(old)) { + up_write(&sess->rpc_lock); goto free_id; + } resp = ksmbd_rpc_open(sess, id); - if (!resp) - goto erase_xa; + if (!resp) { + xa_erase(&sess->rpc_handle_list, entry->id); + up_write(&sess->rpc_lock); + goto free_id; + } - up_read(&sess->rpc_lock); + up_write(&sess->rpc_lock); kvfree(resp); return id; -erase_xa: - xa_erase(&sess->rpc_handle_list, entry->id); free_id: ksmbd_rpc_id_free(entry->id); free_entry: kfree(entry); - up_read(&sess->rpc_lock); return -EINVAL; } @@ -145,7 +148,9 @@ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id) { struct ksmbd_session_rpc *entry; + lockdep_assert_held(&sess->rpc_lock); entry = xa_load(&sess->rpc_handle_list, id); + return entry ? entry->method : 0; } diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h index 995555febe7d1..b8a7317be86b4 100644 --- a/fs/smb/server/server.h +++ b/fs/smb/server/server.h @@ -43,6 +43,7 @@ struct ksmbd_server_config { unsigned int auth_mechs; unsigned int max_connections; unsigned int max_inflight_req; + unsigned int max_ip_connections; char *conf[SERVER_CONF_WORK_GROUP + 1]; struct task_struct *dh_task; diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 6dafc2fbac258..796235cb95677 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -4623,8 +4623,15 @@ static int smb2_get_info_file_pipe(struct ksmbd_session *sess, * pipe without opening it, checking error condition here */ id = req->VolatileFileId; - if (!ksmbd_session_rpc_method(sess, id)) + + lockdep_assert_not_held(&sess->rpc_lock); + + down_read(&sess->rpc_lock); + if (!ksmbd_session_rpc_method(sess, id)) { + up_read(&sess->rpc_lock); return -ENOENT; + } + up_read(&sess->rpc_lock); ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n", req->FileInfoClass, req->VolatileFileId); @@ -5600,7 +5607,8 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work, if (!work->tcon->posix_extensions) { pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n"); - rc = -EOPNOTSUPP; + path_put(&path); + return -EOPNOTSUPP; } else { info = (struct filesystem_posix_info *)(rsp->Buffer); info->OptimalTransferSize = cpu_to_le32(stfs.f_bsize); diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c index 2da2a5f6b983a..ce5c50ac987cf 100644 --- a/fs/smb/server/transport_ipc.c +++ b/fs/smb/server/transport_ipc.c @@ -263,10 +263,16 @@ static void ipc_msg_handle_free(int handle) static int handle_response(int type, void *payload, size_t sz) { - unsigned int handle = *(unsigned int *)payload; + unsigned int handle; struct ipc_msg_table_entry *entry; int ret = 0; + /* Prevent 4-byte read beyond declared payload size */ + if (sz < sizeof(unsigned int)) + return -EINVAL; + + handle = *(unsigned int *)payload; + ipc_update_last_active(); down_read(&ipc_msg_table_lock); hash_for_each_possible(ipc_msg_table, entry, ipc_table_hlist, handle) { @@ -335,6 +341,9 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req) if (req->max_connections) server_conf.max_connections = req->max_connections; + if (req->max_ip_connections) + server_conf.max_ip_connections = req->max_ip_connections; + ret = ksmbd_set_netbios_name(req->netbios_name); ret |= ksmbd_set_server_string(req->server_string); ret |= ksmbd_set_work_group(req->work_group); @@ -822,6 +831,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle if (!msg) return NULL; + lockdep_assert_not_held(&sess->rpc_lock); + + down_read(&sess->rpc_lock); msg->type = KSMBD_EVENT_RPC_REQUEST; req = (struct ksmbd_rpc_command *)msg->payload; req->handle = handle; @@ -830,6 +842,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle req->flags |= KSMBD_RPC_WRITE_METHOD; req->payload_sz = payload_sz; memcpy(req->payload, payload, payload_sz); + up_read(&sess->rpc_lock); resp = ipc_msg_send_request(msg, req->handle); ipc_msg_free(msg); @@ -846,6 +859,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle) if (!msg) return NULL; + lockdep_assert_not_held(&sess->rpc_lock); + + down_read(&sess->rpc_lock); msg->type = KSMBD_EVENT_RPC_REQUEST; req = (struct ksmbd_rpc_command *)msg->payload; req->handle = handle; @@ -853,6 +869,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle) req->flags |= rpc_context_flags(sess); req->flags |= KSMBD_RPC_READ_METHOD; req->payload_sz = 0; + up_read(&sess->rpc_lock); resp = ipc_msg_send_request(msg, req->handle); ipc_msg_free(msg); @@ -873,6 +890,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle if (!msg) return NULL; + lockdep_assert_not_held(&sess->rpc_lock); + + down_read(&sess->rpc_lock); msg->type = KSMBD_EVENT_RPC_REQUEST; req = (struct ksmbd_rpc_command *)msg->payload; req->handle = handle; @@ -881,6 +901,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle req->flags |= KSMBD_RPC_IOCTL_METHOD; req->payload_sz = payload_sz; memcpy(req->payload, payload, payload_sz); + up_read(&sess->rpc_lock); resp = ipc_msg_send_request(msg, req->handle); ipc_msg_free(msg); diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c index 2fc689f99997e..bf79c066a982e 100644 --- a/fs/smb/server/transport_rdma.c +++ b/fs/smb/server/transport_rdma.c @@ -147,11 +147,15 @@ struct smb_direct_transport { wait_queue_head_t wait_send_pending; atomic_t send_pending; - struct delayed_work post_recv_credits_work; + struct work_struct post_recv_credits_work; struct work_struct send_immediate_work; struct work_struct disconnect_work; bool negotiation_requested; + + bool legacy_iwarp; + u8 initiator_depth; + u8 responder_resources; }; #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport)) @@ -346,6 +350,9 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id) t->cm_id = cm_id; cm_id->context = t; + t->initiator_depth = SMB_DIRECT_CM_INITIATOR_DEPTH; + t->responder_resources = 1; + t->status = SMB_DIRECT_CS_NEW; init_waitqueue_head(&t->wait_status); @@ -366,8 +373,8 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id) spin_lock_init(&t->lock_new_recv_credits); - INIT_DELAYED_WORK(&t->post_recv_credits_work, - smb_direct_post_recv_credits); + INIT_WORK(&t->post_recv_credits_work, + smb_direct_post_recv_credits); INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work); INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work); @@ -398,9 +405,9 @@ static void free_transport(struct smb_direct_transport *t) wait_event(t->wait_send_pending, atomic_read(&t->send_pending) == 0); - cancel_work_sync(&t->disconnect_work); - cancel_delayed_work_sync(&t->post_recv_credits_work); - cancel_work_sync(&t->send_immediate_work); + disable_work_sync(&t->disconnect_work); + disable_work_sync(&t->post_recv_credits_work); + disable_work_sync(&t->send_immediate_work); if (t->qp) { ib_drain_qp(t->qp); @@ -614,8 +621,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) wake_up_interruptible(&t->wait_send_credits); if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count)) - mod_delayed_work(smb_direct_wq, - &t->post_recv_credits_work, 0); + queue_work(smb_direct_wq, &t->post_recv_credits_work); if (data_length) { enqueue_reassembly(t, recvmsg, (int)data_length); @@ -772,8 +778,7 @@ static int smb_direct_read(struct ksmbd_transport *t, char *buf, st->count_avail_recvmsg += queue_removed; if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) { spin_unlock(&st->receive_credit_lock); - mod_delayed_work(smb_direct_wq, - &st->post_recv_credits_work, 0); + queue_work(smb_direct_wq, &st->post_recv_credits_work); } else { spin_unlock(&st->receive_credit_lock); } @@ -800,7 +805,7 @@ static int smb_direct_read(struct ksmbd_transport *t, char *buf, static void smb_direct_post_recv_credits(struct work_struct *work) { struct smb_direct_transport *t = container_of(work, - struct smb_direct_transport, post_recv_credits_work.work); + struct smb_direct_transport, post_recv_credits_work); struct smb_direct_recvmsg *recvmsg; int receive_credits, credits = 0; int ret; @@ -933,12 +938,15 @@ static int smb_direct_flush_send_list(struct smb_direct_transport *t, struct smb_direct_sendmsg, list); + if (send_ctx->need_invalidate_rkey) { + first->wr.opcode = IB_WR_SEND_WITH_INV; + first->wr.ex.invalidate_rkey = send_ctx->remote_key; + send_ctx->need_invalidate_rkey = false; + send_ctx->remote_key = 0; + } + last->wr.send_flags = IB_SEND_SIGNALED; last->wr.wr_cqe = &last->cqe; - if (is_last && send_ctx->need_invalidate_rkey) { - last->wr.opcode = IB_WR_SEND_WITH_INV; - last->wr.ex.invalidate_rkey = send_ctx->remote_key; - } ret = smb_direct_post_send(t, &first->wr); if (!ret) { @@ -1625,21 +1633,21 @@ static int smb_direct_send_negotiate_response(struct smb_direct_transport *t, static int smb_direct_accept_client(struct smb_direct_transport *t) { struct rdma_conn_param conn_param; - struct ib_port_immutable port_immutable; - u32 ird_ord_hdr[2]; + __be32 ird_ord_hdr[2]; int ret; + /* + * smb_direct_handle_connect_request() + * already negotiated t->initiator_depth + * and t->responder_resources + */ memset(&conn_param, 0, sizeof(conn_param)); - conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom, - SMB_DIRECT_CM_INITIATOR_DEPTH); - conn_param.responder_resources = 0; - - t->cm_id->device->ops.get_port_immutable(t->cm_id->device, - t->cm_id->port_num, - &port_immutable); - if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { - ird_ord_hdr[0] = conn_param.responder_resources; - ird_ord_hdr[1] = 1; + conn_param.initiator_depth = t->initiator_depth; + conn_param.responder_resources = t->responder_resources; + + if (t->legacy_iwarp) { + ird_ord_hdr[0] = cpu_to_be32(conn_param.responder_resources); + ird_ord_hdr[1] = cpu_to_be32(conn_param.initiator_depth); conn_param.private_data = ird_ord_hdr; conn_param.private_data_len = sizeof(ird_ord_hdr); } else { @@ -1681,7 +1689,7 @@ static int smb_direct_prepare_negotiation(struct smb_direct_transport *t) goto out_err; } - smb_direct_post_recv_credits(&t->post_recv_credits_work.work); + smb_direct_post_recv_credits(&t->post_recv_credits_work); return 0; out_err: put_recvmsg(t, recvmsg); @@ -2025,10 +2033,13 @@ static bool rdma_frwr_is_supported(struct ib_device_attr *attrs) return true; } -static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id) +static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id, + struct rdma_cm_event *event) { struct smb_direct_transport *t; struct task_struct *handler; + u8 peer_initiator_depth; + u8 peer_responder_resources; int ret; if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) { @@ -2042,6 +2053,67 @@ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id) if (!t) return -ENOMEM; + peer_initiator_depth = event->param.conn.initiator_depth; + peer_responder_resources = event->param.conn.responder_resources; + if (rdma_protocol_iwarp(new_cm_id->device, new_cm_id->port_num) && + event->param.conn.private_data_len == 8) { + /* + * Legacy clients with only iWarp MPA v1 support + * need a private blob in order to negotiate + * the IRD/ORD values. + */ + const __be32 *ird_ord_hdr = event->param.conn.private_data; + u32 ird32 = be32_to_cpu(ird_ord_hdr[0]); + u32 ord32 = be32_to_cpu(ird_ord_hdr[1]); + + /* + * cifs.ko sends the legacy IRD/ORD negotiation + * event if iWarp MPA v2 was used. + * + * Here we check that the values match and only + * mark the client as legacy if they don't match. + */ + if ((u32)event->param.conn.initiator_depth != ird32 || + (u32)event->param.conn.responder_resources != ord32) { + /* + * There are broken clients (old cifs.ko) + * using little endian and also + * struct rdma_conn_param only uses u8 + * for initiator_depth and responder_resources, + * so we truncate the value to U8_MAX. + * + * smb_direct_accept_client() will then + * do the real negotiation in order to + * select the minimum between client and + * server. + */ + ird32 = min_t(u32, ird32, U8_MAX); + ord32 = min_t(u32, ord32, U8_MAX); + + t->legacy_iwarp = true; + peer_initiator_depth = (u8)ird32; + peer_responder_resources = (u8)ord32; + } + } + + /* + * First set what the we as server are able to support + */ + t->initiator_depth = min_t(u8, t->initiator_depth, + new_cm_id->device->attrs.max_qp_rd_atom); + + /* + * negotiate the value by using the minimum + * between client and server if the client provided + * non 0 values. + */ + if (peer_initiator_depth != 0) + t->initiator_depth = min_t(u8, t->initiator_depth, + peer_initiator_depth); + if (peer_responder_resources != 0) + t->responder_resources = min_t(u8, t->responder_resources, + peer_responder_resources); + ret = smb_direct_connect(t); if (ret) goto out_err; @@ -2066,7 +2138,7 @@ static int smb_direct_listen_handler(struct rdma_cm_id *cm_id, { switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: { - int ret = smb_direct_handle_connect_request(cm_id); + int ret = smb_direct_handle_connect_request(cm_id, event); if (ret) { pr_err("Can't create transport: %d\n", ret); diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c index 756833c91b140..b51ccc16abe11 100644 --- a/fs/smb/server/transport_tcp.c +++ b/fs/smb/server/transport_tcp.c @@ -240,6 +240,7 @@ static int ksmbd_kthread_fn(void *p) struct interface *iface = (struct interface *)p; struct ksmbd_conn *conn; int ret; + unsigned int max_ip_conns; while (!kthread_should_stop()) { mutex_lock(&iface->sock_release_lock); @@ -257,34 +258,38 @@ static int ksmbd_kthread_fn(void *p) continue; } + if (!server_conf.max_ip_connections) + goto skip_max_ip_conns_limit; + /* * Limits repeated connections from clients with the same IP. */ + max_ip_conns = 0; down_read(&conn_list_lock); - list_for_each_entry(conn, &conn_list, conns_list) + list_for_each_entry(conn, &conn_list, conns_list) { #if IS_ENABLED(CONFIG_IPV6) if (client_sk->sk->sk_family == AF_INET6) { if (memcmp(&client_sk->sk->sk_v6_daddr, - &conn->inet6_addr, 16) == 0) { - ret = -EAGAIN; - break; - } + &conn->inet6_addr, 16) == 0) + max_ip_conns++; } else if (inet_sk(client_sk->sk)->inet_daddr == - conn->inet_addr) { - ret = -EAGAIN; - break; - } + conn->inet_addr) + max_ip_conns++; #else if (inet_sk(client_sk->sk)->inet_daddr == - conn->inet_addr) { + conn->inet_addr) + max_ip_conns++; +#endif + if (server_conf.max_ip_connections <= max_ip_conns) { ret = -EAGAIN; break; } -#endif + } up_read(&conn_list_lock); if (ret == -EAGAIN) continue; +skip_max_ip_conns_limit: if (server_conf.max_connections && atomic_inc_return(&active_num_conn) >= server_conf.max_connections) { pr_info_ratelimited("Limit the maximum number of connections(%u)\n", diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c index d5918eba27e37..f5dcb8353f862 100644 --- a/fs/squashfs/inode.c +++ b/fs/squashfs/inode.c @@ -140,8 +140,17 @@ int squashfs_read_inode(struct inode *inode, long long ino) if (err < 0) goto failed_read; + inode->i_size = le32_to_cpu(sqsh_ino->file_size); frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { + /* + * the file cannot have a fragment (tailend) and have a + * file size a multiple of the block size + */ + if ((inode->i_size & (msblk->block_size - 1)) == 0) { + err = -EINVAL; + goto failed_read; + } frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { @@ -155,7 +164,6 @@ int squashfs_read_inode(struct inode *inode, long long ino) } set_nlink(inode, 1); - inode->i_size = le32_to_cpu(sqsh_ino->file_size); inode->i_fop = &generic_ro_fops; inode->i_mode |= S_IFREG; inode->i_blocks = ((inode->i_size - 1) >> 9) + 1; @@ -165,6 +173,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->block_list_start = block; squashfs_i(inode)->offset = offset; + squashfs_i(inode)->parent = 0; inode->i_data.a_ops = &squashfs_aops; TRACE("File inode %x:%x, start_block %llx, block_list_start " @@ -183,8 +192,21 @@ int squashfs_read_inode(struct inode *inode, long long ino) if (err < 0) goto failed_read; + inode->i_size = le64_to_cpu(sqsh_ino->file_size); + if (inode->i_size < 0) { + err = -EINVAL; + goto failed_read; + } frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { + /* + * the file cannot have a fragment (tailend) and have a + * file size a multiple of the block size + */ + if ((inode->i_size & (msblk->block_size - 1)) == 0) { + err = -EINVAL; + goto failed_read; + } frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { @@ -199,7 +221,6 @@ int squashfs_read_inode(struct inode *inode, long long ino) xattr_id = le32_to_cpu(sqsh_ino->xattr); set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); - inode->i_size = le64_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_inode_ops; inode->i_fop = &generic_ro_fops; inode->i_mode |= S_IFREG; @@ -212,6 +233,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->block_list_start = block; squashfs_i(inode)->offset = offset; + squashfs_i(inode)->parent = 0; inode->i_data.a_ops = &squashfs_aops; TRACE("File inode %x:%x, start_block %llx, block_list_start " @@ -292,6 +314,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) inode->i_mode |= S_IFLNK; squashfs_i(inode)->start = block; squashfs_i(inode)->offset = offset; + squashfs_i(inode)->parent = 0; if (type == SQUASHFS_LSYMLINK_TYPE) { __le32 xattr; @@ -329,6 +352,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); rdev = le32_to_cpu(sqsh_ino->rdev); init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); + squashfs_i(inode)->parent = 0; TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(ino), offset, rdev); @@ -353,6 +377,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); rdev = le32_to_cpu(sqsh_ino->rdev); init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); + squashfs_i(inode)->parent = 0; TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(ino), offset, rdev); @@ -373,6 +398,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) inode->i_mode |= S_IFSOCK; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); init_special_inode(inode, inode->i_mode, 0); + squashfs_i(inode)->parent = 0; break; } case SQUASHFS_LFIFO_TYPE: @@ -392,6 +418,7 @@ int squashfs_read_inode(struct inode *inode, long long ino) inode->i_op = &squashfs_inode_ops; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); init_special_inode(inode, inode->i_mode, 0); + squashfs_i(inode)->parent = 0; break; } default: diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h index 2c82d6f2a4561..8e497ac07b9a8 100644 --- a/fs/squashfs/squashfs_fs_i.h +++ b/fs/squashfs/squashfs_fs_i.h @@ -16,6 +16,7 @@ struct squashfs_inode_info { u64 xattr; unsigned int xattr_size; int xattr_count; + int parent; union { struct { u64 fragment_block; @@ -27,7 +28,6 @@ struct squashfs_inode_info { u64 dir_idx_start; int dir_idx_offset; int dir_idx_cnt; - int parent; }; }; struct inode vfs_inode; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 4386dd845e400..0f97850a165ad 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -2265,6 +2265,9 @@ int udf_current_aext(struct inode *inode, struct extent_position *epos, if (check_add_overflow(sizeof(struct allocExtDesc), le32_to_cpu(header->lengthAllocDescs), &alen)) return -1; + + if (alen > epos->bh->b_size) + return -1; } switch (iinfo->i_alloc_type) { diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h index 3e6682ed656b3..a151ea6c0f721 100644 --- a/fs/xfs/libxfs/xfs_log_format.h +++ b/fs/xfs/libxfs/xfs_log_format.h @@ -174,12 +174,40 @@ typedef struct xlog_rec_header { __be32 h_prev_block; /* block number to previous LR : 4 */ __be32 h_num_logops; /* number of log operations in this LR : 4 */ __be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; - /* new fields */ + + /* fields added by the Linux port: */ __be32 h_fmt; /* format of log record : 4 */ uuid_t h_fs_uuid; /* uuid of FS : 16 */ + + /* fields added for log v2: */ __be32 h_size; /* iclog size : 4 */ + + /* + * When h_size added for log v2 support, it caused structure to have + * a different size on i386 vs all other architectures because the + * sum of the size ofthe member is not aligned by that of the largest + * __be64-sized member, and i386 has really odd struct alignment rules. + * + * Due to the way the log headers are placed out on-disk that alone is + * not a problem becaue the xlog_rec_header always sits alone in a + * BBSIZEs area, and the rest of that area is padded with zeroes. + * But xlog_cksum used to calculate the checksum based on the structure + * size, and thus gives different checksums for i386 vs the rest. + * We now do two checksum validation passes for both sizes to allow + * moving v5 file systems with unclean logs between i386 and other + * (little-endian) architectures. + */ + __u32 h_pad0; } xlog_rec_header_t; +#ifdef __i386__ +#define XLOG_REC_SIZE offsetofend(struct xlog_rec_header, h_size) +#define XLOG_REC_SIZE_OTHER sizeof(struct xlog_rec_header) +#else +#define XLOG_REC_SIZE sizeof(struct xlog_rec_header) +#define XLOG_REC_SIZE_OTHER offsetofend(struct xlog_rec_header, h_size) +#endif /* __i386__ */ + typedef struct xlog_rec_ext_header { __be32 xh_cycle; /* write cycle of log : 4 */ __be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ diff --git a/fs/xfs/libxfs/xfs_ondisk.h b/fs/xfs/libxfs/xfs_ondisk.h index 23c133fd36f5b..01b65d8774533 100644 --- a/fs/xfs/libxfs/xfs_ondisk.h +++ b/fs/xfs/libxfs/xfs_ondisk.h @@ -149,6 +149,8 @@ xfs_check_ondisk_structs(void) XFS_CHECK_STRUCT_SIZE(struct xfs_rud_log_format, 16); XFS_CHECK_STRUCT_SIZE(struct xfs_map_extent, 32); XFS_CHECK_STRUCT_SIZE(struct xfs_phys_extent, 16); + XFS_CHECK_STRUCT_SIZE(struct xlog_rec_header, 328); + XFS_CHECK_STRUCT_SIZE(struct xlog_rec_ext_header, 260); XFS_CHECK_OFFSET(struct xfs_bui_log_format, bui_extents, 16); XFS_CHECK_OFFSET(struct xfs_cui_log_format, cui_extents, 16); diff --git a/fs/xfs/scrub/nlinks.c b/fs/xfs/scrub/nlinks.c index 80aee30886c45..02f5522552dbf 100644 --- a/fs/xfs/scrub/nlinks.c +++ b/fs/xfs/scrub/nlinks.c @@ -376,6 +376,36 @@ xchk_nlinks_collect_pptr( return error; } +static uint +xchk_nlinks_ilock_dir( + struct xfs_inode *ip) +{ + uint lock_mode = XFS_ILOCK_SHARED; + + /* + * We're going to scan the directory entries, so we must be ready to + * pull the data fork mappings into memory if they aren't already. + */ + if (xfs_need_iread_extents(&ip->i_df)) + lock_mode = XFS_ILOCK_EXCL; + + /* + * We're going to scan the parent pointers, so we must be ready to + * pull the attr fork mappings into memory if they aren't already. + */ + if (xfs_has_parent(ip->i_mount) && xfs_inode_has_attr_fork(ip) && + xfs_need_iread_extents(&ip->i_af)) + lock_mode = XFS_ILOCK_EXCL; + + /* + * Take the IOLOCK so that other threads cannot start a directory + * update while we're scanning. + */ + lock_mode |= XFS_IOLOCK_SHARED; + xfs_ilock(ip, lock_mode); + return lock_mode; +} + /* Walk a directory to bump the observed link counts of the children. */ STATIC int xchk_nlinks_collect_dir( @@ -394,8 +424,7 @@ xchk_nlinks_collect_dir( return 0; /* Prevent anyone from changing this directory while we walk it. */ - xfs_ilock(dp, XFS_IOLOCK_SHARED); - lock_mode = xfs_ilock_data_map_shared(dp); + lock_mode = xchk_nlinks_ilock_dir(dp); /* * The dotdot entry of an unlinked directory still points to the last @@ -452,7 +481,6 @@ xchk_nlinks_collect_dir( xchk_iscan_abort(&xnc->collect_iscan); out_unlock: xfs_iunlock(dp, lock_mode); - xfs_iunlock(dp, XFS_IOLOCK_SHARED); return error; } diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index 53697f3c5e1b0..8688edec58755 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -409,8 +409,6 @@ xreap_agextent_iter( if (crosslinked) { trace_xreap_dispose_unmap_extent(sc->sa.pag, agbno, *aglenp); - rs->force_roll = true; - if (rs->oinfo == &XFS_RMAP_OINFO_COW) { /* * If we're unmapping CoW staging extents, remove the @@ -418,11 +416,14 @@ xreap_agextent_iter( * rmap record as well. */ xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp); + rs->force_roll = true; return 0; } - return xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, - *aglenp, rs->oinfo); + xfs_rmap_free_extent(sc->tp, sc->sa.pag->pag_agno, agbno, + *aglenp, rs->oinfo->oi_owner); + rs->deferred++; + return 0; } trace_xreap_dispose_free_extent(sc->sa.pag, agbno, *aglenp); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 26b2f5887b881..a4c00decd97ba 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1567,13 +1567,13 @@ xlog_cksum( struct xlog *log, struct xlog_rec_header *rhead, char *dp, - int size) + unsigned int hdrsize, + unsigned int size) { uint32_t crc; /* first generate the crc for the record header ... */ - crc = xfs_start_cksum_update((char *)rhead, - sizeof(struct xlog_rec_header), + crc = xfs_start_cksum_update((char *)rhead, hdrsize, offsetof(struct xlog_rec_header, h_crc)); /* ... then for additional cycle data for v2 logs ... */ @@ -1837,7 +1837,7 @@ xlog_sync( /* calculcate the checksum */ iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, - iclog->ic_datap, size); + iclog->ic_datap, XLOG_REC_SIZE, size); /* * Intentionally corrupt the log record CRC based on the error injection * frequency, if defined. This facilitates testing log recovery in the diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index b8778a4fd6b64..89a75b7cbd6a4 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -498,8 +498,8 @@ xlog_recover_finish( extern void xlog_recover_cancel(struct xlog *); -extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, - char *dp, int size); +__le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, + char *dp, unsigned int hdrsize, unsigned int size); extern struct kmem_cache *xfs_log_ticket_cache; struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes, diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 704aaadb61cf2..64338ce501c99 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2890,20 +2890,34 @@ xlog_recover_process( int pass, struct list_head *buffer_list) { - __le32 old_crc = rhead->h_crc; - __le32 crc; + __le32 expected_crc = rhead->h_crc, crc, other_crc; - crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); + crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE, + be32_to_cpu(rhead->h_len)); + + /* + * Look at the end of the struct xlog_rec_header definition in + * xfs_log_format.h for the glory details. + */ + if (expected_crc && crc != expected_crc) { + other_crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE_OTHER, + be32_to_cpu(rhead->h_len)); + if (other_crc == expected_crc) { + xfs_notice_once(log->l_mp, + "Fixing up incorrect CRC due to padding."); + crc = other_crc; + } + } /* * Nothing else to do if this is a CRC verification pass. Just return * if this a record with a non-zero crc. Unfortunately, mkfs always - * sets old_crc to 0 so we must consider this valid even on v5 supers. - * Otherwise, return EFSBADCRC on failure so the callers up the stack - * know precisely what failed. + * sets expected_crc to 0 so we must consider this valid even on v5 + * supers. Otherwise, return EFSBADCRC on failure so the callers up the + * stack know precisely what failed. */ if (pass == XLOG_RECOVER_CRCPASS) { - if (old_crc && crc != old_crc) + if (expected_crc && crc != expected_crc) return -EFSBADCRC; return 0; } @@ -2914,11 +2928,11 @@ xlog_recover_process( * zero CRC check prevents warnings from being emitted when upgrading * the kernel from one that does not add CRCs by default. */ - if (crc != old_crc) { - if (old_crc || xfs_has_crc(log->l_mp)) { + if (crc != expected_crc) { + if (expected_crc || xfs_has_crc(log->l_mp)) { xfs_alert(log->l_mp, "log record CRC mismatch: found 0x%x, expected 0x%x.", - le32_to_cpu(old_crc), + le32_to_cpu(expected_crc), le32_to_cpu(crc)); xfs_hex_dump(dp, 32); } diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 201a86b3574da..77eaff6e16b15 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1232,16 +1232,25 @@ suffix_kstrtoint( static inline void xfs_fs_warn_deprecated( struct fs_context *fc, - struct fs_parameter *param, - uint64_t flag, - bool value) + struct fs_parameter *param) { - /* Don't print the warning if reconfiguring and current mount point - * already had the flag set + /* + * Always warn about someone passing in a deprecated mount option. + * Previously we wouldn't print the warning if we were reconfiguring + * and current mount point already had the flag set, but that was not + * the right thing to do. + * + * Many distributions mount the root filesystem with no options in the + * initramfs and rely on mount -a to remount the root fs with the + * options in fstab. However, the old behavior meant that there would + * never be a warning about deprecated mount options for the root fs in + * /etc/fstab. On a single-fs system, that means no warning at all. + * + * Compounding this problem are distribution scripts that copy + * /proc/mounts to fstab, which means that we can't remove mount + * options unless we're 100% sure they have only ever been advertised + * in /proc/mounts in response to explicitly provided mount options. */ - if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && - !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) - return; xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); } @@ -1380,19 +1389,19 @@ xfs_fs_parse_param( #endif /* Following mount options will be removed in September 2025 */ case Opt_ikeep: - xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true); + xfs_fs_warn_deprecated(fc, param); parsing_mp->m_features |= XFS_FEAT_IKEEP; return 0; case Opt_noikeep: - xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false); + xfs_fs_warn_deprecated(fc, param); parsing_mp->m_features &= ~XFS_FEAT_IKEEP; return 0; case Opt_attr2: - xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true); + xfs_fs_warn_deprecated(fc, param); parsing_mp->m_features |= XFS_FEAT_ATTR2; return 0; case Opt_noattr2: - xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true); + xfs_fs_warn_deprecated(fc, param); parsing_mp->m_features |= XFS_FEAT_NOATTR2; return 0; default: diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 78b24b0904888..f35b29f500972 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -213,6 +213,12 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0); */ ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE); +/* + * ACPI Global Lock is mainly used for systems with SMM, so no-SMM systems + * (such as loong_arch) may not have and not use Global Lock. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_use_global_lock, TRUE); + /* * Maximum timeout for While() loop iterations before forced method abort. * This mechanism is intended to prevent infinite loops during interpreter diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 80de699bf6af4..6862982c5f1a9 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -75,6 +75,7 @@ #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__)) #include +#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint) DECLARE_TRACEPOINT(rwmmio_write); DECLARE_TRACEPOINT(rwmmio_post_write); DECLARE_TRACEPOINT(rwmmio_read); @@ -91,6 +92,7 @@ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, #else +#define rwmmio_tracepoint_enabled(tracepoint) false static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, unsigned long caller_addr, unsigned long caller_addr0) {} static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, @@ -189,11 +191,13 @@ static inline u8 readb(const volatile void __iomem *addr) { u8 val; - log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __raw_readb(addr); __io_ar(val); - log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -204,11 +208,13 @@ static inline u16 readw(const volatile void __iomem *addr) { u16 val; - log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); __io_ar(val); - log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -219,11 +225,13 @@ static inline u32 readl(const volatile void __iomem *addr) { u32 val; - log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); __io_ar(val); - log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -235,11 +243,13 @@ static inline u64 readq(const volatile void __iomem *addr) { u64 val; - log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); __io_ar(val); - log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -249,11 +259,13 @@ static inline u64 readq(const volatile void __iomem *addr) #define writeb writeb static inline void writeb(u8 value, volatile void __iomem *addr) { - log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writeb(value, addr); __io_aw(); - log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); } #endif @@ -261,11 +273,13 @@ static inline void writeb(u8 value, volatile void __iomem *addr) #define writew writew static inline void writew(u16 value, volatile void __iomem *addr) { - log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writew((u16 __force)cpu_to_le16(value), addr); __io_aw(); - log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); } #endif @@ -273,11 +287,13 @@ static inline void writew(u16 value, volatile void __iomem *addr) #define writel writel static inline void writel(u32 value, volatile void __iomem *addr) { - log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writel((u32 __force)__cpu_to_le32(value), addr); __io_aw(); - log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); } #endif @@ -286,11 +302,13 @@ static inline void writel(u32 value, volatile void __iomem *addr) #define writeq writeq static inline void writeq(u64 value, volatile void __iomem *addr) { - log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writeq((u64 __force)__cpu_to_le64(value), addr); __io_aw(); - log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); } #endif #endif /* CONFIG_64BIT */ @@ -306,9 +324,11 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr) { u8 val; - log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); val = __raw_readb(addr); - log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -319,9 +339,11 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr) { u16 val; - log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); - log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -332,9 +354,11 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr) { u32 val; - log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); - log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -345,9 +369,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) { u64 val; - log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); - log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -356,9 +382,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) #define writeb_relaxed writeb_relaxed static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) { - log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); __raw_writeb(value, addr); - log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); } #endif @@ -366,9 +394,11 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) #define writew_relaxed writew_relaxed static inline void writew_relaxed(u16 value, volatile void __iomem *addr) { - log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); __raw_writew((u16 __force)cpu_to_le16(value), addr); - log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); } #endif @@ -376,9 +406,11 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr) #define writel_relaxed writel_relaxed static inline void writel_relaxed(u32 value, volatile void __iomem *addr) { - log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); __raw_writel((u32 __force)__cpu_to_le32(value), addr); - log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); } #endif @@ -386,9 +418,11 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr) #define writeq_relaxed writeq_relaxed static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) { - log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); __raw_writeq((u64 __force)__cpu_to_le64(value), addr); - log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); } #endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 23b358a1271cd..3c75199947f09 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -354,6 +354,7 @@ __start_once = .; \ *(.data..once) \ __end_once = .; \ + *(.data..do_once) \ STRUCT_ALIGN(); \ *(__tracepoints) \ /* implement dynamic printk debug */ \ diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 0c70f3a555750..107b797c33ecf 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -152,7 +152,7 @@ struct af_alg_ctx { size_t used; atomic_t rcvused; - u32 more:1, + bool more:1, merge:1, enc:1, write:1, diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h index e0418818d63c8..e3e610cfe8d30 100644 --- a/include/crypto/sha256_base.h +++ b/include/crypto/sha256_base.h @@ -44,7 +44,7 @@ static inline int lib_sha256_base_do_update(struct sha256_state *sctx, sctx->count += len; if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { - int blocks; + unsigned int blocks; if (partial) { int p = SHA256_BLOCK_SIZE - partial; diff --git a/include/linux/audit.h b/include/linux/audit.h index a394614ccd0b8..e3f06eba9c6e6 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -527,7 +527,7 @@ static inline void audit_log_kern_module(const char *name) static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar) { - if (!audit_dummy_context()) + if (audit_enabled) __audit_fanotify(response, friar); } diff --git a/include/linux/bitops.h b/include/linux/bitops.h index ba35bbf07798b..65d084abbc2c8 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -8,7 +8,6 @@ #include -#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) #define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) #define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) diff --git a/include/linux/bits.h b/include/linux/bits.h index 60044b6088172..b48aa3209c2e2 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -12,6 +12,7 @@ #define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG)) #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) #define BITS_PER_BYTE 8 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) /* * Create a contiguous bitmask starting at bit position @l and ending at @@ -19,17 +20,50 @@ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ #if !defined(__ASSEMBLY__) + +/* + * Missing asm support + * + * GENMASK_U*() depend on BITS_PER_TYPE() which relies on sizeof(), + * something not available in asm. Nevertheless, fixed width integers is a C + * concept. Assembly code can rely on the long and long long versions instead. + */ + #include +#include #define GENMASK_INPUT_CHECK(h, l) \ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ __is_constexpr((l) > (h)), (l) > (h), 0))) -#else + +/* + * Generate a mask for the specified type @t. Additional checks are made to + * guarantee the value returned fits in that type, relying on + * -Wshift-count-overflow compiler check to detect incompatible arguments. + * For example, all these create build errors or warnings: + * + * - GENMASK(15, 20): wrong argument order + * - GENMASK(72, 15): doesn't fit unsigned long + * - GENMASK_U32(33, 15): doesn't fit in a u32 + */ +#define GENMASK_TYPE(t, h, l) \ + ((t)(GENMASK_INPUT_CHECK(h, l) + \ + (type_max(t) << (l) & \ + type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h))))) + +#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l) +#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l) +#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l) +#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l) + +#else /* defined(__ASSEMBLY__) */ + /* * BUILD_BUG_ON_ZERO is not available in h files included from asm files, * disable the input check if that is the case. */ #define GENMASK_INPUT_CHECK(h, l) 0 -#endif + +#endif /* !defined(__ASSEMBLY__) */ #define GENMASK(h, l) \ (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l)) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6db72c66de91d..e8d9803cc6756 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -281,6 +281,7 @@ struct bpf_map_owner { bool xdp_has_frags; u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE]; const struct btf_type *attach_func_proto; + enum bpf_attach_type expected_attach_type; }; struct bpf_map { diff --git a/include/linux/btf.h b/include/linux/btf.h index d99178ce01d21..e473bbfe41286 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -82,7 +82,7 @@ * as to avoid issues such as the compiler inlining or eliding either a static * kfunc, or a global kfunc in an LTO build. */ -#define __bpf_kfunc __used __retain noinline +#define __bpf_kfunc __used __retain __noclone noinline #define __bpf_kfunc_start_defs() \ __diag_push(); \ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index a604c54ae44da..794e38320f568 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -32,6 +32,9 @@ */ #define CPUFREQ_ETERNAL (-1) + +#define CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS NSEC_PER_MSEC + #define CPUFREQ_NAME_LEN 16 /* Print length for names. Extra 1 space for accommodating '\n' in prints */ #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) diff --git a/include/linux/device.h b/include/linux/device.h index 39120b172992e..1f6130e13620d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -873,6 +873,9 @@ static inline bool device_pm_not_required(struct device *dev) static inline void device_set_pm_not_required(struct device *dev) { dev->power.no_pm = true; +#ifdef CONFIG_PM + dev->power.no_callbacks = true; +#endif } static inline void dev_pm_syscore_device(struct device *dev, bool val) diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h index 9b85a3f028d1b..61f7a02b05009 100644 --- a/include/linux/firmware/imx/sm.h +++ b/include/linux/firmware/imx/sm.h @@ -17,7 +17,19 @@ #define SCMI_IMX_CTRL_SAI4_MCLK 4 /* WAKE SAI4 MCLK */ #define SCMI_IMX_CTRL_SAI5_MCLK 5 /* WAKE SAI5 MCLK */ +#if IS_ENABLED(CONFIG_IMX_SCMI_MISC_DRV) int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val); int scmi_imx_misc_ctrl_set(u32 id, u32 val); +#else +static inline int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val) +{ + return -EOPNOTSUPP; +} + +static inline int scmi_imx_misc_ctrl_set(u32 id, u32 val) +{ + return -EOPNOTSUPP; +} +#endif #endif diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h index a9f7b7faf57b0..cf55202aaec26 100644 --- a/include/linux/gpio/regmap.h +++ b/include/linux/gpio/regmap.h @@ -37,9 +37,18 @@ struct regmap; * offset to a register/bitmask pair. If not * given the default gpio_regmap_simple_xlate() * is used. + * @fixed_direction_output: + * (Optional) Bitmap representing the fixed direction of + * the GPIO lines. Useful when there are GPIO lines with a + * fixed direction mixed together in the same register. * @drvdata: (Optional) Pointer to driver specific data which is * not used by gpio-remap but is provided "as is" to the * driver callback(s). + * @regmap_irq_chip: (Optional) Pointer on an regmap_irq_chip structure. If + * set, a regmap-irq device will be created and the IRQ + * domain will be set accordingly. + * @regmap_irq_line (Optional) The IRQ the device uses to signal interrupts. + * @regmap_irq_flags (Optional) The IRQF_ flags to use for the interrupt. * * The ->reg_mask_xlate translates a given base address and GPIO offset to * register and mask pair. The base address is one of the given register @@ -77,6 +86,13 @@ struct gpio_regmap_config { int reg_stride; int ngpio_per_reg; struct irq_domain *irq_domain; + unsigned long *fixed_direction_output; + +#ifdef CONFIG_REGMAP_IRQ + struct regmap_irq_chip *regmap_irq_chip; + int regmap_irq_line; + unsigned long regmap_irq_flags; +#endif int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base, unsigned int offset, unsigned int *reg, diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h index de45cf2ee1e4f..ce2086f97e3fc 100644 --- a/include/linux/iio/frequency/adf4350.h +++ b/include/linux/iio/frequency/adf4350.h @@ -51,7 +51,7 @@ /* REG3 Bit Definitions */ #define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3) -#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16) +#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 15) #define ADF4350_REG3_12BIT_CSR_EN (1 << 18) #define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21) #define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22) diff --git a/include/linux/ksm.h b/include/linux/ksm.h index ec9c05044d4fe..af303641819a3 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -57,8 +57,14 @@ static inline long mm_ksm_zero_pages(struct mm_struct *mm) static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { /* Adding mm to ksm is best effort on fork. */ - if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) + if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) { + long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages); + + mm->ksm_merging_pages = 0; + mm->ksm_rmap_items = 0; + atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages); __ksm_enter(mm); + } } static inline int ksm_execve(struct mm_struct *mm) diff --git a/include/linux/mm.h b/include/linux/mm.h index 41f5c88bdf3bc..13b4bd7355c14 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -320,7 +320,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ -#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ +#define VM_MERGEABLE BIT(31) /* KSM may merge identical pages */ #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ @@ -4318,14 +4318,13 @@ static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) * since this value becomes part of PP_SIGNATURE; meaning we can just use the * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is - * 0, we make sure that we leave the two topmost bits empty, as that guarantees - * we won't mistake a valid kernel pointer for a value we set, regardless of the - * VMSPLIT setting. + * 0, we use the lowest bit of PAGE_OFFSET as the boundary if that value is + * known at compile-time. * - * Altogether, this means that the number of bits available is constrained by - * the size of an unsigned long (at the upper end, subtracting two bits per the - * above), and the definition of PP_SIGNATURE (with or without - * POISON_POINTER_DELTA). + * If the value of PAGE_OFFSET is not known at compile time, or if it is too + * small to leave at least 8 bits available above PP_SIGNATURE, we define the + * number of bits to be 0, which turns off the DMA index tracking altogether + * (see page_pool_register_dma_index()). */ #define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA)) #if POISON_POINTER_DELTA > 0 @@ -4334,8 +4333,13 @@ static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) */ #define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT) #else -/* Always leave out the topmost two; see above. */ -#define PP_DMA_INDEX_BITS MIN(32, BITS_PER_LONG - PP_DMA_INDEX_SHIFT - 2) +/* Use the lowest bit of PAGE_OFFSET if there's at least 8 bits available; see above */ +#define PP_DMA_INDEX_MIN_OFFSET (1 << (PP_DMA_INDEX_SHIFT + 8)) +#define PP_DMA_INDEX_BITS ((__builtin_constant_p(PAGE_OFFSET) && \ + PAGE_OFFSET >= PP_DMA_INDEX_MIN_OFFSET && \ + !(PAGE_OFFSET & (PP_DMA_INDEX_MIN_OFFSET - 1))) ? \ + MIN(32, __ffs(PAGE_OFFSET) - PP_DMA_INDEX_SHIFT) : 0) + #endif #define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \ diff --git a/include/linux/once.h b/include/linux/once.h index 30346fcdc7995..449a0e34ad5ad 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -46,7 +46,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key, #define DO_ONCE(func, ...) \ ({ \ bool ___ret = false; \ - static bool __section(".data..once") ___done = false; \ + static bool __section(".data..do_once") ___done = false; \ static DEFINE_STATIC_KEY_TRUE(___once_key); \ if (static_branch_unlikely(&___once_key)) { \ unsigned long ___flags; \ @@ -64,7 +64,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key, #define DO_ONCE_SLEEPABLE(func, ...) \ ({ \ bool ___ret = false; \ - static bool __section(".data..once") ___done = false; \ + static bool __section(".data..do_once") ___done = false; \ static DEFINE_STATIC_KEY_TRUE(___once_key); \ if (static_branch_unlikely(&___once_key)) { \ ___ret = __do_once_sleepable_start(&___done); \ diff --git a/include/linux/pci.h b/include/linux/pci.h index 6b3fef24d60e7..452a3dca28eaa 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1066,6 +1066,20 @@ struct pci_driver { .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 +/** + * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form + * @vend: the vendor name + * @dev: the 16 bit PCI Device ID + * @subvend: the 16 bit PCI Subvendor ID + * @subdev: the 16 bit PCI Subdevice ID + * + * Generate the pci_device_id struct layout for the specific PCI + * device/subdevice. Private data may follow the output. + */ +#define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \ + .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ + .subvendor = (subvend), .subdevice = (subdev), 0, 0 + /** * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form * @vend: the vendor name (without PCI_VENDOR_ID_ prefix) diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index d0b29cd1fd204..142ab39a3b386 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -94,7 +94,9 @@ extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); extern void pm_runtime_release_supplier(struct device_link *link); +int devm_pm_runtime_set_active_enabled(struct device *dev); extern int devm_pm_runtime_enable(struct device *dev); +int devm_pm_runtime_get_noresume(struct device *dev); /** * pm_suspend_ignore_children - Set runtime PM behavior regarding children. @@ -278,7 +280,9 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {} static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} +static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; } static inline int devm_pm_runtime_enable(struct device *dev) { return 0; } +static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; } static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} static inline void pm_runtime_get_noresume(struct device *dev) {} diff --git a/include/linux/rseq.h b/include/linux/rseq.h index bc8af3eb55987..1fbeb61babeb8 100644 --- a/include/linux/rseq.h +++ b/include/linux/rseq.h @@ -7,6 +7,12 @@ #include #include +#ifdef CONFIG_MEMBARRIER +# define RSEQ_EVENT_GUARD irq +#else +# define RSEQ_EVENT_GUARD preempt +#endif + /* * Map the event mask on the user-space ABI enum rseq_cs_flags * for direct mask checks. @@ -41,9 +47,8 @@ static inline void rseq_handle_notify_resume(struct ksignal *ksig, static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { - preempt_disable(); - __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); - preempt_enable(); + scoped_guard(RSEQ_EVENT_GUARD) + __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); rseq_handle_notify_resume(ksig, regs); } diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index e68fecf6eab5b..617ebfff2f304 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -81,7 +81,7 @@ struct svc_serv { unsigned int sv_xdrsize; /* XDR buffer size */ struct list_head sv_permsocks; /* all permanent sockets */ struct list_head sv_tempsocks; /* all temporary sockets */ - int sv_tmpcnt; /* count of temporary sockets */ + int sv_tmpcnt; /* count of temporary "valid" sockets */ struct timer_list sv_temptimer; /* timer for aging temporary sockets */ char * sv_name; /* service name */ diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 0981e35a9feda..72d1a08f48282 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -99,8 +99,27 @@ enum { XPT_HANDSHAKE, /* xprt requests a handshake */ XPT_TLS_SESSION, /* transport-layer security established */ XPT_PEER_AUTH, /* peer has been authenticated */ + XPT_PEER_VALID, /* peer has presented a filehandle that + * it has access to. It is NOT counted + * in ->sv_tmpcnt. + */ + XPT_RPCB_UNREG, /* transport that needs unregistering + * with rpcbind (TCP, UDP) on destroy + */ }; +static inline void svc_xprt_set_valid(struct svc_xprt *xpt) +{ + if (test_bit(XPT_TEMP, &xpt->xpt_flags) && + !test_and_set_bit(XPT_PEER_VALID, &xpt->xpt_flags)) { + struct svc_serv *serv = xpt->xpt_server; + + spin_lock(&serv->sv_lock); + serv->sv_tmpcnt -= 1; + spin_unlock(&serv->sv_lock); + } +} + static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) { spin_lock(&xpt->xpt_lock); diff --git a/include/linux/swap.h b/include/linux/swap.h index f3e0ac20c2e8c..63f85b3fee238 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -382,6 +382,16 @@ void folio_add_lru_vma(struct folio *, struct vm_area_struct *); void mark_page_accessed(struct page *); void folio_mark_accessed(struct folio *); +static inline bool folio_may_be_lru_cached(struct folio *folio) +{ + /* + * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting. + * Holding small numbers of low-order mTHP folios in per-CPU LRU cache + * will be sensible, but nobody has implemented and tested that yet. + */ + return !folio_test_large(folio); +} + extern atomic_t lru_disable_count; static inline bool lru_cache_disabled(void) diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index df33333650a0d..da4309f3cea3f 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -15,6 +15,7 @@ #ifndef __LINUX_USB_GADGET_H #define __LINUX_USB_GADGET_H +#include #include #include #include @@ -32,6 +33,7 @@ struct usb_ep; /** * struct usb_request - describes one i/o request + * @ep: The associated endpoint set by usb_ep_alloc_request(). * @buf: Buffer used for data. Always provide this; some controllers * only use PIO, or don't use DMA for some endpoints. * @dma: DMA address corresponding to 'buf'. If you don't set this @@ -98,6 +100,7 @@ struct usb_ep; */ struct usb_request { + struct usb_ep *ep; void *buf; unsigned length; dma_addr_t dma; @@ -291,6 +294,28 @@ static inline void usb_ep_fifo_flush(struct usb_ep *ep) /*-------------------------------------------------------------------------*/ +/** + * free_usb_request - frees a usb_request object and its buffer + * @req: the request being freed + * + * This helper function frees both the request's buffer and the request object + * itself by calling usb_ep_free_request(). Its signature is designed to be used + * with DEFINE_FREE() to enable automatic, scope-based cleanup for usb_request + * pointers. + */ +static inline void free_usb_request(struct usb_request *req) +{ + if (!req) + return; + + kfree(req->buf); + usb_ep_free_request(req->ep, req); +} + +DEFINE_FREE(free_usb_request, struct usb_request *, free_usb_request(_T)) + +/*-------------------------------------------------------------------------*/ + struct usb_dcd_config_params { __u8 bU1devExitLat; /* U1 Device exit Latency */ #define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */ diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 43343f1586d13..d0746e2c869ad 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -1952,19 +1952,23 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers; * * Note: only legacy non-MC drivers may need this macro. */ -#define v4l2_subdev_call_state_try(sd, o, f, args...) \ - ({ \ - int __result; \ - static struct lock_class_key __key; \ - const char *name = KBUILD_BASENAME \ - ":" __stringify(__LINE__) ":state->lock"; \ - struct v4l2_subdev_state *state = \ - __v4l2_subdev_state_alloc(sd, name, &__key); \ - v4l2_subdev_lock_state(state); \ - __result = v4l2_subdev_call(sd, o, f, state, ##args); \ - v4l2_subdev_unlock_state(state); \ - __v4l2_subdev_state_free(state); \ - __result; \ +#define v4l2_subdev_call_state_try(sd, o, f, args...) \ + ({ \ + int __result; \ + static struct lock_class_key __key; \ + const char *name = KBUILD_BASENAME \ + ":" __stringify(__LINE__) ":state->lock"; \ + struct v4l2_subdev_state *state = \ + __v4l2_subdev_state_alloc(sd, name, &__key); \ + if (IS_ERR(state)) { \ + __result = PTR_ERR(state); \ + } else { \ + v4l2_subdev_lock_state(state); \ + __result = v4l2_subdev_call(sd, o, f, state, ##args); \ + v4l2_subdev_unlock_state(state); \ + __v4l2_subdev_state_free(state); \ + } \ + __result; \ }) /** diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index df4af45f8603c..69a1d8b12beff 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1203,6 +1203,27 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, return NULL; } +static inline struct hci_conn *hci_conn_hash_lookup_role(struct hci_dev *hdev, + __u8 type, __u8 role, + bdaddr_t *ba) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == type && c->role == role && !bacmp(&c->dst, ba)) { + rcu_read_unlock(); + return c; + } + } + + rcu_read_unlock(); + + return NULL; +} + static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, bdaddr_t *ba, __u8 ba_type) diff --git a/include/net/bonding.h b/include/net/bonding.h index 8bb5f016969f1..95f67b308c19a 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -695,6 +695,7 @@ void bond_debug_register(struct bonding *bond); void bond_debug_unregister(struct bonding *bond); void bond_debug_reregister(struct bonding *bond); const char *bond_mode_name(int mode); +bool bond_xdp_check(struct bonding *bond, int mode); void bond_setup(struct net_device *bond_dev); unsigned int bond_get_num_tx_queues(void); int bond_netlink_init(void); diff --git a/include/net/dst.h b/include/net/dst.h index e18826cd05595..e5c9ea1883838 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -561,6 +561,38 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu) dst->ops->update_pmtu(dst, NULL, skb, mtu, false); } +static inline struct net_device *dst_dev(const struct dst_entry *dst) +{ + return READ_ONCE(dst->dev); +} + +static inline struct net_device *dst_dev_rcu(const struct dst_entry *dst) +{ + /* In the future, use rcu_dereference(dst->dev) */ + WARN_ON_ONCE(!rcu_read_lock_held()); + return READ_ONCE(dst->dev); +} + +static inline struct net_device *skb_dst_dev(const struct sk_buff *skb) +{ + return dst_dev(skb_dst(skb)); +} + +static inline struct net_device *skb_dst_dev_rcu(const struct sk_buff *skb) +{ + return dst_dev_rcu(skb_dst(skb)); +} + +static inline struct net *skb_dst_dev_net(const struct sk_buff *skb) +{ + return dev_net(skb_dst_dev(skb)); +} + +static inline struct net *skb_dst_dev_net_rcu(const struct sk_buff *skb) +{ + return dev_net_rcu(skb_dst_dev(skb)); +} + struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie); void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu, bool confirm_neigh); diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 74dd90ff5f129..c32878c69179d 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -150,7 +150,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, int iif, int sdif, bool *refcounted) { - struct net *net = dev_net(skb_dst(skb)->dev); + struct net *net = dev_net_rcu(skb_dst(skb)->dev); const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct sock *sk; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 4bd93571e6c1b..bcc138ff087bd 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -116,7 +116,8 @@ struct inet_connection_sock { #define ATO_BITS 8 __u32 ato:ATO_BITS, /* Predicted tick of soft clock */ lrcv_flowlabel:20, /* last received ipv6 flowlabel */ - unused:4; + dst_quick_ack:1, /* cache dst RTAX_QUICKACK */ + unused:3; unsigned long timeout; /* Currently scheduled timeout */ __u32 lrcvtime; /* timestamp of last received data packet */ __u16 last_seg_size; /* Size of last incoming segment */ diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 5eea47f135a42..3c4118c63cfe1 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -492,7 +492,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, const int sdif, bool *refcounted) { - struct net *net = dev_net(skb_dst(skb)->dev); + struct net *net = skb_dst_dev_net_rcu(skb); const struct iphdr *iph = ip_hdr(skb); struct sock *sk; diff --git a/include/net/ip.h b/include/net/ip.h index bd201278c55a5..5f0f1215d2f92 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -475,7 +475,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, rcu_read_lock(); - net = dev_net_rcu(dst->dev); + net = dev_net_rcu(dst_dev(dst)); if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) || ip_mtu_locked(dst) || !forwarding) { @@ -489,7 +489,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, if (mtu) goto out; - mtu = READ_ONCE(dst->dev->mtu); + mtu = READ_ONCE(dst_dev(dst)->mtu); if (unlikely(ip_mtu_locked(dst))) { if (rt->rt_uses_gateway && mtu > 576) @@ -509,16 +509,17 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, static inline unsigned int ip_skb_dst_mtu(struct sock *sk, const struct sk_buff *skb) { + const struct dst_entry *dst = skb_dst(skb); unsigned int mtu; if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; - return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); + return ip_dst_mtu_maybe_forward(dst, forwarding); } - mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); - return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); + mtu = min(READ_ONCE(dst_dev(dst)->mtu), IP_MAX_MTU); + return mtu - lwtunnel_headroom(dst->lwtstate, mtu); } struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len, diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index ae83a969ae64b..01fcf952b05de 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -605,6 +605,21 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, int headroom, bool reply); +static inline void ip_tunnel_adj_headroom(struct net_device *dev, + unsigned int headroom) +{ + /* we must cap headroom to some upperlimit, else pskb_expand_head + * will overflow header offsets in skb_headers_offset_update(). + */ + const unsigned int max_allowed = 512; + + if (headroom > max_allowed) + headroom = max_allowed; + + if (headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, headroom); +} + int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); static inline int iptunnel_pull_offloads(struct sk_buff *skb) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index d7b7b6cd4aa10..8a75c73fc5558 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -114,7 +114,6 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct netlink_ext_ack *extack); void qdisc_put_rtab(struct qdisc_rate_table *tab); void qdisc_put_stab(struct qdisc_size_table *tab); -void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, spinlock_t *root_lock, bool validate); @@ -290,4 +289,28 @@ static inline bool tc_qdisc_stats_dump(struct Qdisc *sch, return true; } +static inline void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc) +{ + if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { + pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", + txt, qdisc->ops->id, qdisc->handle >> 16); + qdisc->flags |= TCQ_F_WARN_NONWC; + } +} + +static inline unsigned int qdisc_peek_len(struct Qdisc *sch) +{ + struct sk_buff *skb; + unsigned int len; + + skb = sch->ops->peek(sch); + if (unlikely(skb == NULL)) { + qdisc_warn_nonwc("qdisc_peek_len", sch); + return 0; + } + len = qdisc_pkt_len(skb); + + return len; +} + #endif diff --git a/include/net/route.h b/include/net/route.h index 8a11d19f897bb..232b7bf55ba22 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -369,7 +369,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst) const struct net *net; rcu_read_lock(); - net = dev_net_rcu(dst->dev); + net = dev_net_rcu(dst_dev(dst)); hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); rcu_read_unlock(); } diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h index 63b55ccc4f00c..e5187144c91b7 100644 --- a/include/trace/events/dma.h +++ b/include/trace/events/dma.h @@ -136,6 +136,7 @@ DECLARE_EVENT_CLASS(dma_alloc_class, __entry->dma_addr = dma_addr; __entry->size = size; __entry->flags = flags; + __entry->dir = dir; __entry->attrs = attrs; ), diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h index b8d1e00a7982c..2dfeb158e848a 100644 --- a/include/trace/events/filelock.h +++ b/include/trace/events/filelock.h @@ -27,7 +27,8 @@ { FL_SLEEP, "FL_SLEEP" }, \ { FL_DOWNGRADE_PENDING, "FL_DOWNGRADE_PENDING" }, \ { FL_UNLOCK_PENDING, "FL_UNLOCK_PENDING" }, \ - { FL_OFDLCK, "FL_OFDLCK" }) + { FL_OFDLCK, "FL_OFDLCK" }, \ + { FL_RECLAIM, "FL_RECLAIM"}) #define show_fl_type(val) \ __print_symbolic(val, \ diff --git a/include/uapi/linux/hidraw.h b/include/uapi/linux/hidraw.h index d5ee269864e07..ebd701b3c18d9 100644 --- a/include/uapi/linux/hidraw.h +++ b/include/uapi/linux/hidraw.h @@ -48,6 +48,8 @@ struct hidraw_devinfo { #define HIDIOCGOUTPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0C, len) #define HIDIOCREVOKE _IOW('H', 0x0D, int) /* Revoke device access */ +#define HIDIOCTL_LAST _IOC_NR(HIDIOCREVOKE) + #define HIDRAW_FIRST_MINOR 0 #define HIDRAW_MAX_DEVICES 64 /* number of reports to buffer */ diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h index c50d152e7b3e0..9ac161866653a 100644 --- a/include/vdso/gettime.h +++ b/include/vdso/gettime.h @@ -5,6 +5,7 @@ #include struct __kernel_timespec; +struct __kernel_old_timeval; struct timezone; #if !defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) diff --git a/init/Kconfig b/init/Kconfig index 45990792cb4a6..219ccdb0af732 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1440,6 +1440,7 @@ config BOOT_CONFIG_EMBED_FILE config INITRAMFS_PRESERVE_MTIME bool "Preserve cpio archive mtimes in initramfs" + depends on BLK_DEV_INITRD default y help Each entry in an initramfs cpio archive carries an mtime value. When diff --git a/init/main.c b/init/main.c index c4778edae7972..821df1f05e9c0 100644 --- a/init/main.c +++ b/init/main.c @@ -543,6 +543,12 @@ static int __init unknown_bootoption(char *param, char *val, const char *unused, void *arg) { size_t len = strlen(param); + /* + * Well-known bootloader identifiers: + * 1. LILO/Grub pass "BOOT_IMAGE=..."; + * 2. kexec/kdump (kexec-tools) pass "kexec". + */ + const char *bootloader[] = { "BOOT_IMAGE=", "kexec", NULL }; /* Handle params aliased to sysctls */ if (sysctl_is_alias(param)) @@ -550,6 +556,12 @@ static int __init unknown_bootoption(char *param, char *val, repair_env_string(param, val); + /* Handle bootloader identifier */ + for (int i = 0; bootloader[i]; i++) { + if (strstarts(param, bootloader[i])) + return 0; + } + /* Handle obsolete-style parameters */ if (obsolete_checksetup(param)) return 0; diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index c6c624eb9866d..5c0a02bfeb555 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -55,7 +55,6 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) struct io_ring_ctx *ctx = file->private_data; struct io_overflow_cqe *ocqe; struct io_rings *r = ctx->rings; - struct rusage sq_usage; unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; unsigned int sq_head = READ_ONCE(r->sq.head); unsigned int sq_tail = READ_ONCE(r->sq.tail); @@ -155,14 +154,15 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) * thread termination. */ if (tsk) { + u64 usec; + get_task_struct(tsk); rcu_read_unlock(); - getrusage(tsk, RUSAGE_SELF, &sq_usage); + usec = io_sq_cpu_usec(tsk); put_task_struct(tsk); sq_pid = sq->task_pid; sq_cpu = sq->sq_cpu; - sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000 - + sq_usage.ru_stime.tv_usec); + sq_total_time = usec; sq_work_time = sq->work_time; } else { rcu_read_unlock(); diff --git a/io_uring/filetable.c b/io_uring/filetable.c index 997c56d32ee6c..6183d61c7222d 100644 --- a/io_uring/filetable.c +++ b/io_uring/filetable.c @@ -62,7 +62,7 @@ void io_free_file_tables(struct io_file_table *table) static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file, u32 slot_index) - __must_hold(&req->ctx->uring_lock) + __must_hold(&ctx->uring_lock) { struct io_fixed_file *file_slot; int ret; diff --git a/io_uring/rw.c b/io_uring/rw.c index 3ad104cf1e7d8..996cd4bec4821 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -477,7 +477,7 @@ static void io_req_io_end(struct io_kiocb *req) static bool __io_complete_rw_common(struct io_kiocb *req, long res) { if (unlikely(res != req->cqe.res)) { - if (res == -EAGAIN && io_rw_should_reissue(req)) { + if ((res == -EOPNOTSUPP || res == -EAGAIN) && io_rw_should_reissue(req)) { /* * Reissue will start accounting again, finish the * current cycle. diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 2faa3058b2d0e..44e7959b52d94 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -175,7 +176,38 @@ static inline bool io_sqd_events_pending(struct io_sq_data *sqd) return READ_ONCE(sqd->state); } -static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) +struct io_sq_time { + bool started; + u64 usec; +}; + +u64 io_sq_cpu_usec(struct task_struct *tsk) +{ + u64 utime, stime; + + task_cputime_adjusted(tsk, &utime, &stime); + do_div(stime, 1000); + return stime; +} + +static void io_sq_update_worktime(struct io_sq_data *sqd, struct io_sq_time *ist) +{ + if (!ist->started) + return; + ist->started = false; + sqd->work_time += io_sq_cpu_usec(current) - ist->usec; +} + +static void io_sq_start_worktime(struct io_sq_time *ist) +{ + if (ist->started) + return; + ist->started = true; + ist->usec = io_sq_cpu_usec(current); +} + +static int __io_sq_thread(struct io_ring_ctx *ctx, struct io_sq_data *sqd, + bool cap_entries, struct io_sq_time *ist) { unsigned int to_submit; int ret = 0; @@ -188,6 +220,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) if (to_submit || !wq_list_empty(&ctx->iopoll_list)) { const struct cred *creds = NULL; + io_sq_start_worktime(ist); + if (ctx->sq_creds != current_cred()) creds = override_creds(ctx->sq_creds); @@ -261,23 +295,11 @@ static bool io_sq_tw_pending(struct llist_node *retry_list) return retry_list || !llist_empty(&tctx->task_list); } -static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start) -{ - struct rusage end; - - getrusage(current, RUSAGE_SELF, &end); - end.ru_stime.tv_sec -= start->ru_stime.tv_sec; - end.ru_stime.tv_usec -= start->ru_stime.tv_usec; - - sqd->work_time += end.ru_stime.tv_usec + end.ru_stime.tv_sec * 1000000; -} - static int io_sq_thread(void *data) { struct llist_node *retry_list = NULL; struct io_sq_data *sqd = data; struct io_ring_ctx *ctx; - struct rusage start; unsigned long timeout = 0; char buf[TASK_COMM_LEN]; DEFINE_WAIT(wait); @@ -315,6 +337,7 @@ static int io_sq_thread(void *data) mutex_lock(&sqd->lock); while (1) { bool cap_entries, sqt_spin = false; + struct io_sq_time ist = { }; if (io_sqd_events_pending(sqd) || signal_pending(current)) { if (io_sqd_handle_event(sqd)) @@ -323,9 +346,8 @@ static int io_sq_thread(void *data) } cap_entries = !list_is_singular(&sqd->ctx_list); - getrusage(current, RUSAGE_SELF, &start); list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { - int ret = __io_sq_thread(ctx, cap_entries); + int ret = __io_sq_thread(ctx, sqd, cap_entries, &ist); if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) sqt_spin = true; @@ -333,15 +355,18 @@ static int io_sq_thread(void *data) if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE)) sqt_spin = true; - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) - if (io_napi(ctx)) + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { + if (io_napi(ctx)) { + io_sq_start_worktime(&ist); io_napi_sqpoll_busy_poll(ctx); + } + } + + io_sq_update_worktime(sqd, &ist); if (sqt_spin || !time_after(jiffies, timeout)) { - if (sqt_spin) { - io_sq_update_worktime(sqd, &start); + if (sqt_spin) timeout = jiffies + sqd->sq_thread_idle; - } if (unlikely(need_resched())) { mutex_unlock(&sqd->lock); cond_resched(); diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h index b83dcdec9765f..fd2f6f29b516e 100644 --- a/io_uring/sqpoll.h +++ b/io_uring/sqpoll.h @@ -29,6 +29,7 @@ void io_sq_thread_unpark(struct io_sq_data *sqd); void io_put_sq_data(struct io_sq_data *sqd); void io_sqpoll_wait_sq(struct io_ring_ctx *ctx); int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask); +u64 io_sq_cpu_usec(struct task_struct *tsk); static inline struct task_struct *sqpoll_task_locked(struct io_sq_data *sqd) { diff --git a/io_uring/waitid.c b/io_uring/waitid.c index 2f7b5eeab845e..ecaa358d0ad87 100644 --- a/io_uring/waitid.c +++ b/io_uring/waitid.c @@ -272,13 +272,14 @@ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode, if (!pid_child_should_wake(wo, p)) return 0; + list_del_init(&wait->entry); + /* cancel is in progress */ if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK) return 1; req->io_task_work.func = io_waitid_cb; io_req_task_work_add(req); - list_del_init(&wait->entry); return 1; } diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 9380e0fd5e4af..08bdb623f4f91 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2326,6 +2326,7 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map, map->owner->type = prog_type; map->owner->jited = fp->jited; map->owner->xdp_has_frags = aux->xdp_has_frags; + map->owner->expected_attach_type = fp->expected_attach_type; map->owner->attach_func_proto = aux->attach_func_proto; for_each_cgroup_storage_type(i) { map->owner->storage_cookie[i] = @@ -2337,6 +2338,10 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map, ret = map->owner->type == prog_type && map->owner->jited == fp->jited && map->owner->xdp_has_frags == aux->xdp_has_frags; + if (ret && + map->map_type == BPF_MAP_TYPE_PROG_ARRAY && + map->owner->expected_attach_type != fp->expected_attach_type) + ret = false; for_each_cgroup_storage_type(i) { if (!ret) break; @@ -2953,7 +2958,10 @@ EXPORT_SYMBOL_GPL(bpf_event_output); /* Always built-in helper functions. */ const struct bpf_func_proto bpf_tail_call_proto = { - .func = NULL, + /* func is unused for tail_call, we set it to pass the + * get_helper_proto check + */ + .func = BPF_PTR_POISON, .gpl_only = false, .ret_type = RET_VOID, .arg1_type = ARG_PTR_TO_CTX, diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 9aaf5124648bd..746b5644d9a19 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -775,7 +775,7 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root) return 0; } -static void bpf_free_inode(struct inode *inode) +static void bpf_destroy_inode(struct inode *inode) { enum bpf_type type; @@ -790,7 +790,7 @@ const struct super_operations bpf_super_ops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, .show_options = bpf_show_options, - .free_inode = bpf_free_inode, + .destroy_inode = bpf_destroy_inode, }; enum { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 24ae8f33e5d76..96640a80fd9c4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7799,6 +7799,10 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno, verbose(env, "verifier bug. Two map pointers in a timer helper\n"); return -EFAULT; } + if (IS_ENABLED(CONFIG_PREEMPT_RT)) { + verbose(env, "bpf_timer cannot be used for PREEMPT_RT.\n"); + return -EOPNOTSUPP; + } meta->map_uid = reg->map_uid; meta->map_ptr = map; return 0; @@ -10465,7 +10469,7 @@ static int get_helper_proto(struct bpf_verifier_env *env, int func_id, return -EINVAL; *ptr = env->ops->get_func_proto(func_id, env->prog); - return *ptr ? 0 : -EINVAL; + return *ptr && (*ptr)->func ? 0 : -EINVAL; } static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, @@ -14541,7 +14545,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { - if (insn->imm != 0 || insn->off > 1 || + if (insn->imm != 0 || (insn->off != 0 && insn->off != 1) || (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; @@ -14551,7 +14555,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) if (err) return err; } else { - if (insn->src_reg != BPF_REG_0 || insn->off > 1 || + if (insn->src_reg != BPF_REG_0 || (insn->off != 0 && insn->off != 1) || (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 25f9565f798d4..13eb986172499 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1679,11 +1679,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, if (prstate_housekeeping_conflict(new_prs, xcpus)) return PERR_HKEEPING; - /* - * A parent can be left with no CPU as long as there is no - * task directly associated with the parent partition. - */ - if (nocpu) + if (tasks_nocpu_error(parent, cs, xcpus)) return PERR_NOCPUS; deleting = cpumask_and(tmp->delmask, xcpus, parent->effective_xcpus); diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 39972e834e7a1..035dda07ab0d0 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "debug.h" @@ -594,7 +595,9 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) if (rc == -ENOMEM) { pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; - } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { + } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && + is_swiotlb_active(entry->dev))) { err_printk(entry->dev, entry, "cacheline tracking EEXIST, overlapping mappings aren't supported\n"); } diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 8a47e52a454f4..49d87e6db553f 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -223,6 +223,10 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, struct perf_callchain_entry_ctx ctx; int rctx, start_entry_idx; + /* crosstask is not supported for user stacks */ + if (crosstask && user && !kernel) + return NULL; + entry = get_callchain_entry(&rctx); if (!entry) return NULL; @@ -239,18 +243,15 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, perf_callchain_kernel(&ctx, regs); } - if (user) { + if (user && !crosstask) { if (!user_mode(regs)) { - if (current->mm) - regs = task_pt_regs(current); - else + if (current->flags & (PF_KTHREAD | PF_USER_WORKER)) regs = NULL; + else + regs = task_pt_regs(current); } if (regs) { - if (crosstask) - goto exit_put; - if (add_mark) perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); @@ -260,7 +261,6 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, } } -exit_put: put_callchain_entry(rctx); return entry; diff --git a/kernel/events/core.c b/kernel/events/core.c index 3cc06ffb60c1b..d6a86d8e9e59b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7095,7 +7095,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user, if (user_mode(regs)) { regs_user->abi = perf_reg_abi(current); regs_user->regs = regs; - } else if (!(current->flags & PF_KTHREAD)) { + } else if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { perf_get_regs_user(regs_user, regs); } else { regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; @@ -7735,7 +7735,7 @@ static u64 perf_virt_to_phys(u64 virt) * Try IRQ-safe get_user_page_fast_only first. * If failed, leave phys_addr as 0. */ - if (current->mm != NULL) { + if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { struct page *p; pagefault_disable(); @@ -7847,7 +7847,8 @@ struct perf_callchain_entry * perf_callchain(struct perf_event *event, struct pt_regs *regs) { bool kernel = !event->attr.exclude_callchain_kernel; - bool user = !event->attr.exclude_callchain_user; + bool user = !event->attr.exclude_callchain_user && + !(current->flags & (PF_KTHREAD | PF_USER_WORKER)); /* Disallow cross-task user callchains. */ bool crosstask = event->ctx->task && event->ctx->task != current; const u32 max_stack = event->attr.sample_max_stack; @@ -8997,7 +8998,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) flags |= MAP_HUGETLB; if (file) { - struct inode *inode; + const struct inode *inode; dev_t dev; buf = kmalloc(PATH_MAX, GFP_KERNEL); @@ -9010,12 +9011,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) * need to add enough zero bytes after the string to handle * the 64bit alignment we do later. */ - name = file_path(file, buf, PATH_MAX - sizeof(u64)); + name = d_path(file_user_path(file), buf, PATH_MAX - sizeof(u64)); if (IS_ERR(name)) { name = "//toolong"; goto cpy_name; } - inode = file_inode(vma->vm_file); + inode = file_user_inode(vma->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; gen = inode->i_generation; @@ -9086,7 +9087,7 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter, if (!filter->path.dentry) return false; - if (d_inode(filter->path.dentry) != file_inode(file)) + if (d_inode(filter->path.dentry) != file_user_inode(file)) return false; if (filter->offset > offset + size) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index e60f5e71e35df..c00981cc6fe5b 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -114,7 +114,7 @@ struct xol_area { static void uprobe_warn(struct task_struct *t, const char *msg) { - pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg); + pr_warn("uprobe: %s:%d failed to %s\n", t->comm, t->pid, msg); } /* diff --git a/kernel/fork.c b/kernel/fork.c index 97c9afe3efc38..e5ec098a6f61e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1807,7 +1807,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct *tsk, return 0; } -static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) +static int copy_sighand(u64 clone_flags, struct task_struct *tsk) { struct sighand_struct *sig; diff --git a/kernel/futex/requeue.c b/kernel/futex/requeue.c index b47bb764b3520..559aae55792c6 100644 --- a/kernel/futex/requeue.c +++ b/kernel/futex/requeue.c @@ -225,18 +225,20 @@ static inline void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, struct futex_hash_bucket *hb) { - q->key = *key; + struct task_struct *task; + q->key = *key; __futex_unqueue(q); WARN_ON(!q->rt_waiter); q->rt_waiter = NULL; q->lock_ptr = &hb->lock; + task = READ_ONCE(q->task); /* Signal locked state to the waiter */ futex_requeue_pi_complete(q, 1); - wake_up_state(q->task, TASK_NORMAL); + wake_up_state(task, TASK_NORMAL); } /** diff --git a/kernel/padata.c b/kernel/padata.c index 3e0ef0753e73e..c3810f5bd7156 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -290,7 +290,11 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd, if (remove_object) { list_del_init(&padata->list); ++pd->processed; - pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); + /* When sequence wraps around, reset to the first CPU. */ + if (unlikely(pd->processed == 0)) + pd->cpu = cpumask_first(pd->cpumask.pcpu); + else + pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); } spin_unlock(&reorder->lock); diff --git a/kernel/pid.c b/kernel/pid.c index 2715afb77eab8..b80c3bfb58d07 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -487,7 +487,7 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) struct upid *upid; pid_t nr = 0; - if (pid && ns->level <= pid->level) { + if (pid && ns && ns->level <= pid->level) { upid = &pid->numbers[ns->level]; if (upid->ns == ns) nr = upid->nr; diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 1c9fe741fe6d5..d839b564522f6 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -722,11 +722,24 @@ static int em_recalc_and_update(struct device *dev, struct em_perf_domain *pd, * Adjustment of CPU performance values after boot, when all CPUs capacites * are correctly calculated. */ -static void em_adjust_new_capacity(struct device *dev, - struct em_perf_domain *pd, - u64 max_cap) +static void em_adjust_new_capacity(unsigned int cpu, struct device *dev, + struct em_perf_domain *pd) { + unsigned long cpu_capacity = arch_scale_cpu_capacity(cpu); struct em_perf_table *em_table; + struct em_perf_state *table; + unsigned long em_max_perf; + + rcu_read_lock(); + table = em_perf_state_from_pd(pd); + em_max_perf = table[pd->nr_perf_states - 1].performance; + rcu_read_unlock(); + + if (em_max_perf == cpu_capacity) + return; + + pr_debug("updating cpu%d cpu_cap=%lu old capacity=%lu\n", cpu, + cpu_capacity, em_max_perf); em_table = em_table_dup(pd); if (!em_table) { @@ -742,10 +755,7 @@ static void em_adjust_new_capacity(struct device *dev, static void em_check_capacity_update(void) { cpumask_var_t cpu_done_mask; - struct em_perf_state *table; - struct em_perf_domain *pd; - unsigned long cpu_capacity; - int cpu; + int cpu, failed_cpus = 0; if (!zalloc_cpumask_var(&cpu_done_mask, GFP_KERNEL)) { pr_warn("no free memory\n"); @@ -755,7 +765,7 @@ static void em_check_capacity_update(void) /* Check if CPUs capacity has changed than update EM */ for_each_possible_cpu(cpu) { struct cpufreq_policy *policy; - unsigned long em_max_perf; + struct em_perf_domain *pd; struct device *dev; if (cpumask_test_cpu(cpu, cpu_done_mask)) @@ -763,41 +773,25 @@ static void em_check_capacity_update(void) policy = cpufreq_cpu_get(cpu); if (!policy) { - pr_debug("Accessing cpu%d policy failed\n", cpu); - schedule_delayed_work(&em_update_work, - msecs_to_jiffies(1000)); - break; + failed_cpus++; + continue; } cpufreq_cpu_put(policy); - pd = em_cpu_get(cpu); + dev = get_cpu_device(cpu); + pd = em_pd_get(dev); if (!pd || em_is_artificial(pd)) continue; cpumask_or(cpu_done_mask, cpu_done_mask, em_span_cpus(pd)); - cpu_capacity = arch_scale_cpu_capacity(cpu); - - rcu_read_lock(); - table = em_perf_state_from_pd(pd); - em_max_perf = table[pd->nr_perf_states - 1].performance; - rcu_read_unlock(); - - /* - * Check if the CPU capacity has been adjusted during boot - * and trigger the update for new performance values. - */ - if (em_max_perf == cpu_capacity) - continue; - - pr_debug("updating cpu%d cpu_cap=%lu old capacity=%lu\n", - cpu, cpu_capacity, em_max_perf); - - dev = get_cpu_device(cpu); - em_adjust_new_capacity(dev, pd, cpu_capacity); + em_adjust_new_capacity(cpu, dev, pd); } + if (failed_cpus) + schedule_delayed_work(&em_update_work, msecs_to_jiffies(1000)); + free_cpumask_var(cpu_done_mask); } diff --git a/kernel/rseq.c b/kernel/rseq.c index 23894ba8250cf..810005f927d7c 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -255,12 +255,12 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags) /* * Load and clear event mask atomically with respect to - * scheduler preemption. + * scheduler preemption and membarrier IPIs. */ - preempt_disable(); - event_mask = t->rseq_event_mask; - t->rseq_event_mask = 0; - preempt_enable(); + scoped_guard(RSEQ_EVENT_GUARD) { + event_mask = t->rseq_event_mask; + t->rseq_event_mask = 0; + } return !!event_mask; } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 53e3670fbb1e0..6ec66fef3f91e 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2617,6 +2617,25 @@ static int find_later_rq(struct task_struct *task) return -1; } +static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) +{ + struct task_struct *p; + + if (!has_pushable_dl_tasks(rq)) + return NULL; + + p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); + + WARN_ON_ONCE(rq->cpu != task_cpu(p)); + WARN_ON_ONCE(task_current(rq, p)); + WARN_ON_ONCE(p->nr_cpus_allowed <= 1); + + WARN_ON_ONCE(!task_on_rq_queued(p)); + WARN_ON_ONCE(!dl_task(p)); + + return p; +} + /* Locks the rq it finds */ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) { @@ -2644,12 +2663,37 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { - if (unlikely(task_rq(task) != rq || + /* + * double_lock_balance had to release rq->lock, in the + * meantime, task may no longer be fit to be migrated. + * Check the following to ensure that the task is + * still suitable for migration: + * 1. It is possible the task was scheduled, + * migrate_disabled was set and then got preempted, + * so we must check the task migration disable + * flag. + * 2. The CPU picked is in the task's affinity. + * 3. For throttled task (dl_task_offline_migration), + * check the following: + * - the task is not on the rq anymore (it was + * migrated) + * - the task is not on CPU anymore + * - the task is still a dl task + * - the task is not queued on the rq anymore + * 4. For the non-throttled task (push_dl_task), the + * check to ensure that this task is still at the + * head of the pushable tasks list is enough. + */ + if (unlikely(is_migration_disabled(task) || !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || - task_on_cpu(rq, task) || - !dl_task(task) || - is_migration_disabled(task) || - !task_on_rq_queued(task))) { + (task->dl.dl_throttled && + (task_rq(task) != rq || + task_on_cpu(rq, task) || + !dl_task(task) || + !task_on_rq_queued(task))) || + (!task->dl.dl_throttled && + task != pick_next_pushable_dl_task(rq)))) { + double_unlock_balance(rq, later_rq); later_rq = NULL; break; @@ -2672,25 +2716,6 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) return later_rq; } -static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) -{ - struct task_struct *p; - - if (!has_pushable_dl_tasks(rq)) - return NULL; - - p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); - - WARN_ON_ONCE(rq->cpu != task_cpu(p)); - WARN_ON_ONCE(task_current(rq, p)); - WARN_ON_ONCE(p->nr_cpus_allowed <= 1); - - WARN_ON_ONCE(!task_on_rq_queued(p)); - WARN_ON_ONCE(!dl_task(p)); - - return p; -} - /* * See if the non running -deadline tasks on this rq * can be sent to some other CPU where they can preempt diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5baacc7b3b290..748667d5155ef 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7187,6 +7187,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) int h_nr_delayed = 0; struct cfs_rq *cfs_rq; u64 slice = 0; + int ret = 0; if (entity_is_task(se)) { p = task_of(se); @@ -7218,7 +7219,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) - return 0; + goto out; /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { @@ -7261,7 +7262,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) - return 0; + goto out; } sub_nr_running(rq, h_nr_queued); @@ -7273,6 +7274,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) if (unlikely(!was_sched_idle && sched_idle_rq(rq))) rq->next_balance = jiffies; + ret = 1; +out: if (p && task_delayed) { SCHED_WARN_ON(!task_sleep); SCHED_WARN_ON(p->on_rq != 1); @@ -7288,7 +7291,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) __block_task(rq, p); } - return 1; + return ret; } /* @@ -9056,21 +9059,21 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf return p; idle: - if (!rf) - return NULL; - - new_tasks = sched_balance_newidle(rq, rf); + if (rf) { + new_tasks = sched_balance_newidle(rq, rf); - /* - * Because sched_balance_newidle() releases (and re-acquires) rq->lock, it is - * possible for any higher priority task to appear. In that case we - * must re-start the pick_next_entity() loop. - */ - if (new_tasks < 0) - return RETRY_TASK; + /* + * Because sched_balance_newidle() releases (and re-acquires) + * rq->lock, it is possible for any higher priority task to + * appear. In that case we must re-start the pick_next_entity() + * loop. + */ + if (new_tasks < 0) + return RETRY_TASK; - if (new_tasks > 0) - goto again; + if (new_tasks > 0) + goto again; + } /* * rq is about to be idle, check if we need to update the diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 91d5dfeeb1dec..ab82550248bae 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3732,11 +3732,9 @@ static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm) static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm) { struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; - struct cpumask *cpumask; int cid; lockdep_assert_rq_held(rq); - cpumask = mm_cidmask(mm); cid = __this_cpu_read(pcpu_cid->cid); if (mm_cid_is_valid(cid)) { mm_cid_snapshot_time(rq, mm); diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 0cd1f8b5a102e..1eac0d2b8ecbe 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -733,6 +733,26 @@ seccomp_prepare_user_filter(const char __user *user_filter) } #ifdef SECCOMP_ARCH_NATIVE +static bool seccomp_uprobe_exception(struct seccomp_data *sd) +{ +#if defined __NR_uretprobe || defined __NR_uprobe +#ifdef SECCOMP_ARCH_COMPAT + if (sd->arch == SECCOMP_ARCH_NATIVE) +#endif + { +#ifdef __NR_uretprobe + if (sd->nr == __NR_uretprobe) + return true; +#endif +#ifdef __NR_uprobe + if (sd->nr == __NR_uprobe) + return true; +#endif + } +#endif + return false; +} + /** * seccomp_is_const_allow - check if filter is constant allow with given data * @fprog: The BPF programs @@ -750,13 +770,8 @@ static bool seccomp_is_const_allow(struct sock_fprog_kern *fprog, return false; /* Our single exception to filtering. */ -#ifdef __NR_uretprobe -#ifdef SECCOMP_ARCH_COMPAT - if (sd->arch == SECCOMP_ARCH_NATIVE) -#endif - if (sd->nr == __NR_uretprobe) - return true; -#endif + if (seccomp_uprobe_exception(sd)) + return true; for (pc = 0; pc < fprog->len; pc++) { struct sock_filter *insn = &fprog->filter[pc]; @@ -1034,6 +1049,9 @@ static const int mode1_syscalls[] = { __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn, #ifdef __NR_uretprobe __NR_uretprobe, +#endif +#ifdef __NR_uprobe + __NR_uprobe, #endif -1, /* negative terminated */ }; @@ -1124,7 +1142,7 @@ static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd, struct seccomp_kn static bool should_sleep_killable(struct seccomp_filter *match, struct seccomp_knotif *n) { - return match->wait_killable_recv && n->state == SECCOMP_NOTIFY_SENT; + return match->wait_killable_recv && n->state >= SECCOMP_NOTIFY_SENT; } static int seccomp_do_user_notification(int this_syscall, @@ -1171,13 +1189,11 @@ static int seccomp_do_user_notification(int this_syscall, if (err != 0) { /* - * Check to see if the notifcation got picked up and - * whether we should switch to wait killable. + * Check to see whether we should switch to wait + * killable. Only return the interrupted error if not. */ - if (!wait_killable && should_sleep_killable(match, &n)) - continue; - - goto interrupted; + if (!(!wait_killable && should_sleep_killable(match, &n))) + goto interrupted; } addfd = list_first_entry_or_null(&n.addfd, diff --git a/kernel/smp.c b/kernel/smp.c index f25e20617b7eb..fa6faf50fb43b 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -891,16 +891,15 @@ static void smp_call_function_many_cond(const struct cpumask *mask, * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. - * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait - * (atomically) until function has completed on other CPUs. If - * %SCF_RUN_LOCAL is set, the function will also be run locally - * if the local CPU is set in the @cpumask. - * - * If @wait is true, then returns once @func has returned. + * @wait: If true, wait (atomically) until function has completed + * on other CPUs. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. + * + * @func is not called on the local CPU even if @mask contains it. Consider + * using on_each_cpu_cond_mask() instead if this is not desirable. */ void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) diff --git a/kernel/sys.c b/kernel/sys.c index 4da31f28fda81..35990f0796bca 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1698,6 +1698,7 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, struct rlimit old, new; struct task_struct *tsk; unsigned int checkflags = 0; + bool need_tasklist; int ret; if (old_rlim) @@ -1724,8 +1725,25 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, get_task_struct(tsk); rcu_read_unlock(); - ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, - old_rlim ? &old : NULL); + need_tasklist = !same_thread_group(tsk, current); + if (need_tasklist) { + /* + * Ensure we can't race with group exit or de_thread(), + * so tsk->group_leader can't be freed or changed until + * read_unlock(tasklist_lock) below. + */ + read_lock(&tasklist_lock); + if (!pid_alive(tsk)) + ret = -ESRCH; + } + + if (!ret) { + ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, + old_rlim ? &old : NULL); + } + + if (need_tasklist) + read_unlock(&tasklist_lock); if (!ret && old_rlim) { rlim_to_rlim64(&old, &old64); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 3ec7df7dbeec4..4a44451efbcc6 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2759,19 +2759,24 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, struct bpf_run_ctx *old_run_ctx; int err; + /* + * graph tracer framework ensures we won't migrate, so there is no need + * to use migrate_disable for bpf_prog_run again. The check here just for + * __this_cpu_inc_return. + */ + cant_sleep(); + if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { bpf_prog_inc_misses_counter(link->link.prog); err = 0; goto out; } - migrate_disable(); rcu_read_lock(); old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx); err = bpf_prog_run(link->link.prog, regs); bpf_reset_run_ctx(old_run_ctx); rcu_read_unlock(); - migrate_enable(); out: __this_cpu_dec(bpf_prog_active); diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index c9b0533407ede..76737492e750e 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -239,6 +239,10 @@ static int dyn_event_open(struct inode *inode, struct file *file) { int ret; + ret = security_locked_down(LOCKDOWN_TRACEFS); + if (ret) + return ret; + ret = tracing_check_open_get_tr(NULL); if (ret) return ret; diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c index af7d6e2060d9d..440dbfa6bbfd5 100644 --- a/kernel/trace/trace_fprobe.c +++ b/kernel/trace/trace_fprobe.c @@ -343,12 +343,14 @@ static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip, void *entry_data) { struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp); + unsigned int flags = trace_probe_load_flag(&tf->tp); int ret = 0; - if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE)) + if (flags & TP_FLAG_TRACE) fentry_trace_func(tf, entry_ip, regs); + #ifdef CONFIG_PERF_EVENTS - if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE)) + if (flags & TP_FLAG_PROFILE) ret = fentry_perf_func(tf, entry_ip, regs); #endif return ret; @@ -360,11 +362,12 @@ static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip, void *entry_data) { struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp); + unsigned int flags = trace_probe_load_flag(&tf->tp); - if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE)) + if (flags & TP_FLAG_TRACE) fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data); #ifdef CONFIG_PERF_EVENTS - if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE)) + if (flags & TP_FLAG_PROFILE) fexit_perf_func(tf, entry_ip, ret_ip, regs, entry_data); #endif } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 6b9c3f3f870f4..b273611c5026c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1799,14 +1799,15 @@ static int kprobe_register(struct trace_event_call *event, static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) { struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); + unsigned int flags = trace_probe_load_flag(&tk->tp); int ret = 0; raw_cpu_inc(*tk->nhit); - if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE)) + if (flags & TP_FLAG_TRACE) kprobe_trace_func(tk, regs); #ifdef CONFIG_PERF_EVENTS - if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE)) + if (flags & TP_FLAG_PROFILE) ret = kprobe_perf_func(tk, regs); #endif return ret; @@ -1818,6 +1819,7 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) { struct kretprobe *rp = get_kretprobe(ri); struct trace_kprobe *tk; + unsigned int flags; /* * There is a small chance that get_kretprobe(ri) returns NULL when @@ -1830,10 +1832,11 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) tk = container_of(rp, struct trace_kprobe, rp); raw_cpu_inc(*tk->nhit); - if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE)) + flags = trace_probe_load_flag(&tk->tp); + if (flags & TP_FLAG_TRACE) kretprobe_trace_func(tk, ri, regs); #ifdef CONFIG_PERF_EVENTS - if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE)) + if (flags & TP_FLAG_PROFILE) kretprobe_perf_func(tk, ri, regs); #endif return 0; /* We don't tweak kernel, so just return 0 */ diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 8a6797c2278d9..4f54f7935d5db 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -269,16 +269,21 @@ struct event_file_link { struct list_head list; }; +static inline unsigned int trace_probe_load_flag(struct trace_probe *tp) +{ + return smp_load_acquire(&tp->event->flags); +} + static inline bool trace_probe_test_flag(struct trace_probe *tp, unsigned int flag) { - return !!(tp->event->flags & flag); + return !!(trace_probe_load_flag(tp) & flag); } static inline void trace_probe_set_flag(struct trace_probe *tp, unsigned int flag) { - tp->event->flags |= flag; + smp_store_release(&tp->event->flags, tp->event->flags | flag); } static inline void trace_probe_clear_flag(struct trace_probe *tp, diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 9916677acf24e..f210e71bc1550 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1531,6 +1531,7 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) struct trace_uprobe *tu; struct uprobe_dispatch_data udd; struct uprobe_cpu_buffer *ucb = NULL; + unsigned int flags; int ret = 0; tu = container_of(con, struct trace_uprobe, consumer); @@ -1545,11 +1546,12 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) if (WARN_ON_ONCE(!uprobe_cpu_buffer)) return 0; - if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) + flags = trace_probe_load_flag(&tu->tp); + if (flags & TP_FLAG_TRACE) ret |= uprobe_trace_func(tu, regs, &ucb); #ifdef CONFIG_PERF_EVENTS - if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) + if (flags & TP_FLAG_PROFILE) ret |= uprobe_perf_func(tu, regs, &ucb); #endif uprobe_buffer_put(ucb); @@ -1562,6 +1564,7 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, struct trace_uprobe *tu; struct uprobe_dispatch_data udd; struct uprobe_cpu_buffer *ucb = NULL; + unsigned int flags; tu = container_of(con, struct trace_uprobe, consumer); @@ -1573,11 +1576,12 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, if (WARN_ON_ONCE(!uprobe_cpu_buffer)) return 0; - if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) + flags = trace_probe_load_flag(&tu->tp); + if (flags & TP_FLAG_TRACE) uretprobe_trace_func(tu, func, regs, &ucb); #ifdef CONFIG_PERF_EVENTS - if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) + if (flags & TP_FLAG_PROFILE) uretprobe_perf_func(tu, func, regs, &ucb); #endif uprobe_buffer_put(ucb); diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c index 2f844c279a3e0..7f24ccc896c64 100644 --- a/kernel/vhost_task.c +++ b/kernel/vhost_task.c @@ -100,6 +100,7 @@ void vhost_task_stop(struct vhost_task *vtsk) * freeing it below. */ wait_for_completion(&vtsk->exited); + put_task_struct(vtsk->task); kfree(vtsk); } EXPORT_SYMBOL_GPL(vhost_task_stop); @@ -148,7 +149,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), return ERR_PTR(PTR_ERR(tsk)); } - vtsk->task = tsk; + vtsk->task = get_task_struct(tsk); return vtsk; } EXPORT_SYMBOL_GPL(vhost_task_create); diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 969baab8c805f..7ccc18accb235 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -33,6 +33,10 @@ obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519-generic.o libcurve25519-generic-y := curve25519-fiat32.o libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o libcurve25519-generic-y += curve25519-generic.o +# clang versions prior to 18 may blow out the stack with KASAN +ifeq ($(call clang-min-version, 180000),) +KASAN_SANITIZE_curve25519-hacl64.o := n +endif obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o libcurve25519-y += curve25519.o diff --git a/lib/genalloc.c b/lib/genalloc.c index 4fa5635bf81bd..841f297838333 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -899,8 +899,11 @@ struct gen_pool *of_gen_pool_get(struct device_node *np, if (!name) name = of_node_full_name(np_pool); } - if (pdev) + if (pdev) { pool = gen_pool_get(&pdev->dev, name); + put_device(&pdev->dev); + } + of_node_put(np_pool); return pool; diff --git a/localversion-rt b/localversion-rt index 9f7d0bdbffb18..08b3e75841adc 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt13 +-rt14 diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c index c2b4f0b071472..5654e31a198a4 100644 --- a/mm/damon/lru_sort.c +++ b/mm/damon/lru_sort.c @@ -203,7 +203,7 @@ static int damon_lru_sort_apply_parameters(void) goto out; } - err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs); + err = damon_set_attrs(param_ctx, &damon_lru_sort_mon_attrs); if (err) goto out; diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index dba3b2f4d7581..52e5c244bd021 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -324,10 +324,8 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, } pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); - if (!pte) { - walk->action = ACTION_AGAIN; + if (!pte) return 0; - } if (!pte_present(ptep_get(pte))) goto out; damon_ptep_mkold(pte, walk->vma, addr); @@ -479,10 +477,8 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); - if (!pte) { - walk->action = ACTION_AGAIN; + if (!pte) return 0; - } ptent = ptep_get(pte); if (!pte_present(ptent)) goto out; diff --git a/mm/gup.c b/mm/gup.c index e9be7c49542a0..d105817a0c9aa 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2356,8 +2356,8 @@ static unsigned long collect_longterm_unpinnable_folios( struct pages_or_folios *pofs) { unsigned long collected = 0; - bool drain_allow = true; struct folio *folio; + int drained = 0; long i = 0; for (folio = pofs_get_folio(pofs, i); folio; @@ -2376,10 +2376,17 @@ static unsigned long collect_longterm_unpinnable_folios( continue; } - if (drain_allow && folio_ref_count(folio) != - folio_expected_ref_count(folio) + 1) { + if (drained == 0 && folio_may_be_lru_cached(folio) && + folio_ref_count(folio) != + folio_expected_ref_count(folio) + 1) { + lru_add_drain(); + drained = 1; + } + if (drained == 1 && folio_may_be_lru_cached(folio) && + folio_ref_count(folio) != + folio_expected_ref_count(folio) + 1) { lru_add_drain_all(); - drain_allow = false; + drained = 2; } if (!folio_isolate_lru(folio)) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f94a9d4135855..92df29fc44fdc 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3715,32 +3715,26 @@ static unsigned long deferred_split_count(struct shrinker *shrink, static bool thp_underused(struct folio *folio) { int num_zero_pages = 0, num_filled_pages = 0; - void *kaddr; int i; if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1) return false; + if (folio_contain_hwpoisoned_page(folio)) + return false; + for (i = 0; i < folio_nr_pages(folio); i++) { - kaddr = kmap_local_folio(folio, i * PAGE_SIZE); - if (!memchr_inv(kaddr, 0, PAGE_SIZE)) { - num_zero_pages++; - if (num_zero_pages > khugepaged_max_ptes_none) { - kunmap_local(kaddr); + if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) { + if (++num_zero_pages > khugepaged_max_ptes_none) return true; - } } else { /* * Another path for early exit once the number * of non-zero filled pages exceeds threshold. */ - num_filled_pages++; - if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) { - kunmap_local(kaddr); + if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) return false; - } } - kunmap_local(kaddr); } return false; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f116af53a9392..d404532ae41e2 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3453,6 +3453,9 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) initialized = true; } + if (!h->max_huge_pages) + return; + /* do node specific alloc */ if (hugetlb_hstate_alloc_pages_specific_nodes(h)) return; @@ -6886,6 +6889,8 @@ long hugetlb_change_protection(struct vm_area_struct *vma, psize); } spin_unlock(ptl); + + cond_resched(); } /* * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c index a495debf14363..abb79a6c07691 100644 --- a/mm/kmsan/core.c +++ b/mm/kmsan/core.c @@ -195,7 +195,8 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b, u32 origin, bool checked) { u64 address = (u64)addr; - u32 *shadow_start, *origin_start; + void *shadow_start; + u32 *aligned_shadow, *origin_start; size_t pad = 0; KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size)); @@ -214,9 +215,12 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b, } __memset(shadow_start, b, size); - if (!IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) { + if (IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) { + aligned_shadow = shadow_start; + } else { pad = address % KMSAN_ORIGIN_SIZE; address -= pad; + aligned_shadow = shadow_start - pad; size += pad; } size = ALIGN(size, KMSAN_ORIGIN_SIZE); @@ -230,7 +234,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b, * corresponding shadow slot is zero. */ for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) { - if (origin || !shadow_start[i]) + if (origin || !aligned_shadow[i]) origin_start[i] = origin; } } diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c index 13236d579ebaa..c95a8e72e4967 100644 --- a/mm/kmsan/kmsan_test.c +++ b/mm/kmsan/kmsan_test.c @@ -556,6 +556,21 @@ DEFINE_TEST_MEMSETXX(16) DEFINE_TEST_MEMSETXX(32) DEFINE_TEST_MEMSETXX(64) +/* Test case: ensure that KMSAN does not access shadow memory out of bounds. */ +static void test_memset_on_guarded_buffer(struct kunit *test) +{ + void *buf = vmalloc(PAGE_SIZE); + + kunit_info(test, + "memset() on ends of guarded buffer should not crash\n"); + + for (size_t size = 0; size <= 128; size++) { + memset(buf, 0xff, size); + memset(buf + PAGE_SIZE - size, 0xff, size); + } + vfree(buf); +} + static noinline void fibonacci(int *array, int size, int start) { if (start < 2 || (start == size)) @@ -661,6 +676,7 @@ static struct kunit_case kmsan_test_cases[] = { KUNIT_CASE(test_memset16), KUNIT_CASE(test_memset32), KUNIT_CASE(test_memset64), + KUNIT_CASE(test_memset_on_guarded_buffer), KUNIT_CASE(test_long_origin_chain), KUNIT_CASE(test_stackdepot_roundtrip), KUNIT_CASE(test_unpoison_memory), diff --git a/mm/migrate.c b/mm/migrate.c index 8619aa884eaa8..bc6d5aeec718f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -198,19 +198,17 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list) } static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, - struct folio *folio, - unsigned long idx) + struct folio *folio, pte_t old_pte, unsigned long idx) { struct page *page = folio_page(folio, idx); - bool contains_data; pte_t newpte; - void *addr; - if (PageCompound(page)) + if (PageCompound(page) || PageHWPoison(page)) return false; + VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page); + VM_BUG_ON_PAGE(pte_present(old_pte), page); if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || mm_forbids_zeropage(pvmw->vma->vm_mm)) @@ -221,15 +219,17 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, * this subpage has been non present. If the subpage is only zero-filled * then map it to the shared zeropage. */ - addr = kmap_local_page(page); - contains_data = memchr_inv(addr, 0, PAGE_SIZE); - kunmap_local(addr); - - if (contains_data) + if (!pages_identical(page, ZERO_PAGE(0))) return false; newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address), pvmw->vma->vm_page_prot)); + + if (pte_swp_soft_dirty(old_pte)) + newpte = pte_mksoft_dirty(newpte); + if (pte_swp_uffd_wp(old_pte)) + newpte = pte_mkuffd_wp(newpte); + set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio)); @@ -272,13 +272,13 @@ static bool remove_migration_pte(struct folio *folio, continue; } #endif + old_pte = ptep_get(pvmw.pte); if (rmap_walk_arg->map_unused_to_zeropage && - try_to_map_unused_to_zeropage(&pvmw, folio, idx)) + try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx)) continue; folio_get(folio); pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); - old_pte = ptep_get(pvmw.pte); entry = pte_to_swp_entry(old_pte); if (!is_migration_entry_young(entry)) diff --git a/mm/mlock.c b/mm/mlock.c index cde076fa7d5e5..8c8d522efdd59 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -255,7 +255,7 @@ void mlock_folio(struct folio *folio) folio_get(folio); if (!folio_batch_add(fbatch, mlock_lru(folio)) || - folio_test_large(folio) || lru_cache_disabled()) + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } @@ -278,7 +278,7 @@ void mlock_new_folio(struct folio *folio) folio_get(folio); if (!folio_batch_add(fbatch, mlock_new(folio)) || - folio_test_large(folio) || lru_cache_disabled()) + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } @@ -299,7 +299,7 @@ void munlock_folio(struct folio *folio) */ folio_get(folio); if (!folio_batch_add(fbatch, folio) || - folio_test_large(folio) || lru_cache_disabled()) + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 752576749db9d..765c890e6a843 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4052,7 +4052,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) if (!(gfp_mask & __GFP_NOMEMALLOC)) { alloc_flags |= ALLOC_NON_BLOCK; - if (order > 0) + if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE)) alloc_flags |= ALLOC_HIGHATOMIC; } diff --git a/mm/slab.h b/mm/slab.h index 92ca5ff203753..b65d2462b3fdb 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -572,8 +572,12 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) unsigned long obj_exts = READ_ONCE(slab->obj_exts); #ifdef CONFIG_MEMCG - VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS), - slab_page(slab)); + /* + * obj_exts should be either NULL, a valid pointer with + * MEMCG_DATA_OBJEXTS bit set or be equal to OBJEXTS_ALLOC_FAIL. + */ + VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS) && + obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab)); VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab)); #endif return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK); diff --git a/mm/slub.c b/mm/slub.c index 7fbba36f7aac5..64fdd1d122b92 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1941,9 +1941,9 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) } } -static inline void mark_failed_objexts_alloc(struct slab *slab) +static inline bool mark_failed_objexts_alloc(struct slab *slab) { - slab->obj_exts = OBJEXTS_ALLOC_FAIL; + return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0; } static inline void handle_failed_objexts_alloc(unsigned long obj_exts, @@ -1965,7 +1965,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts, #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} -static inline void mark_failed_objexts_alloc(struct slab *slab) {} +static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; } static inline void handle_failed_objexts_alloc(unsigned long obj_exts, struct slabobj_ext *vec, unsigned int objects) {} @@ -1998,9 +1998,14 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, slab_nid(slab)); if (!vec) { - /* Mark vectors which failed to allocate */ - if (new_slab) - mark_failed_objexts_alloc(slab); + /* + * Try to mark vectors which failed to allocate. + * If this operation fails, there may be a racing process + * that has already completed the allocation. + */ + if (!mark_failed_objexts_alloc(slab) && + slab_obj_exts(slab)) + return 0; return -ENOMEM; } @@ -2009,6 +2014,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, #ifdef CONFIG_MEMCG new_exts |= MEMCG_DATA_OBJEXTS; #endif +retry: old_exts = READ_ONCE(slab->obj_exts); handle_failed_objexts_alloc(old_exts, vec, objects); if (new_slab) { @@ -2018,8 +2024,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, * be simply assigned. */ slab->obj_exts = new_exts; - } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) || - cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { + } else if (old_exts & ~OBJEXTS_FLAGS_MASK) { /* * If the slab is already in use, somebody can allocate and * assign slabobj_exts in parallel. In this case the existing @@ -2028,6 +2033,9 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, mark_objexts_empty(vec); kfree(vec); return 0; + } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { + /* Retry if a racing thread changed slab->obj_exts from under us. */ + goto retry; } kmemleak_not_leak(vec); @@ -2039,8 +2047,15 @@ static inline void free_slab_obj_exts(struct slab *slab) struct slabobj_ext *obj_exts; obj_exts = slab_obj_exts(slab); - if (!obj_exts) + if (!obj_exts) { + /* + * If obj_exts allocation failed, slab->obj_exts is set to + * OBJEXTS_ALLOC_FAIL. In this case, we end up here and should + * clear the flag. + */ + slab->obj_exts = 0; return; + } /* * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its diff --git a/mm/swap.c b/mm/swap.c index 59f30a981c6f9..ff846915db454 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -195,6 +195,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) for (i = 0; i < folio_batch_count(fbatch); i++) { struct folio *folio = fbatch->folios[i]; + /* block memcg migration while the folio moves between lru */ + if (move_fn != lru_add && !folio_test_clear_lru(folio)) + continue; + folio_lruvec_relock_irqsave(folio, &lruvec, &flags); move_fn(lruvec, folio); @@ -207,14 +211,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) } static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, - struct folio *folio, move_fn_t move_fn, - bool on_lru, bool disable_irq) + struct folio *folio, move_fn_t move_fn, bool disable_irq) { unsigned long flags; - if (on_lru && !folio_test_clear_lru(folio)) - return; - folio_get(folio); if (disable_irq) @@ -222,8 +222,8 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, else local_lock(&cpu_fbatches.lock); - if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) || - lru_cache_disabled()) + if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn); if (disable_irq) @@ -232,13 +232,13 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, local_unlock(&cpu_fbatches.lock); } -#define folio_batch_add_and_move(folio, op, on_lru) \ - __folio_batch_add_and_move( \ - &cpu_fbatches.op, \ - folio, \ - op, \ - on_lru, \ - offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \ +#define folio_batch_add_and_move(folio, op) \ + __folio_batch_add_and_move( \ + &cpu_fbatches.op, \ + folio, \ + op, \ + offsetof(struct cpu_fbatches, op) >= \ + offsetof(struct cpu_fbatches, lock_irq) \ ) static void lru_move_tail(struct lruvec *lruvec, struct folio *folio) @@ -262,10 +262,10 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio) void folio_rotate_reclaimable(struct folio *folio) { if (folio_test_locked(folio) || folio_test_dirty(folio) || - folio_test_unevictable(folio)) + folio_test_unevictable(folio) || !folio_test_lru(folio)) return; - folio_batch_add_and_move(folio, lru_move_tail, true); + folio_batch_add_and_move(folio, lru_move_tail); } void lru_note_cost(struct lruvec *lruvec, bool file, @@ -354,10 +354,11 @@ static void folio_activate_drain(int cpu) void folio_activate(struct folio *folio) { - if (folio_test_active(folio) || folio_test_unevictable(folio)) + if (folio_test_active(folio) || folio_test_unevictable(folio) || + !folio_test_lru(folio)) return; - folio_batch_add_and_move(folio, lru_activate, true); + folio_batch_add_and_move(folio, lru_activate); } #else @@ -510,7 +511,7 @@ void folio_add_lru(struct folio *folio) lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) folio_set_active(folio); - folio_batch_add_and_move(folio, lru_add, false); + folio_batch_add_and_move(folio, lru_add); } EXPORT_SYMBOL(folio_add_lru); @@ -685,10 +686,10 @@ void lru_add_drain_cpu(int cpu) void deactivate_file_folio(struct folio *folio) { /* Deactivating an unevictable folio will not accelerate reclaim */ - if (folio_test_unevictable(folio)) + if (folio_test_unevictable(folio) || !folio_test_lru(folio)) return; - folio_batch_add_and_move(folio, lru_deactivate_file, true); + folio_batch_add_and_move(folio, lru_deactivate_file); } /* @@ -701,10 +702,11 @@ void deactivate_file_folio(struct folio *folio) */ void folio_deactivate(struct folio *folio) { - if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled())) + if (folio_test_unevictable(folio) || !folio_test_lru(folio) || + !(folio_test_active(folio) || lru_gen_enabled())) return; - folio_batch_add_and_move(folio, lru_deactivate, true); + folio_batch_add_and_move(folio, lru_deactivate); } /** @@ -717,10 +719,11 @@ void folio_deactivate(struct folio *folio) void folio_mark_lazyfree(struct folio *folio) { if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || + !folio_test_lru(folio) || folio_test_swapcache(folio) || folio_test_unevictable(folio)) return; - folio_batch_add_and_move(folio, lru_lazyfree, true); + folio_batch_add_and_move(folio, lru_lazyfree); } void lru_add_drain(void) diff --git a/mm/swapfile.c b/mm/swapfile.c index c02493d9c7bee..883333a87a45f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2337,6 +2337,8 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type) VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); + if (check_stable_address_space(mm)) + goto unlock; for_each_vma(vmi, vma) { if (vma->anon_vma && !is_vm_hugetlb_page(vma)) { ret = unuse_vma(vma, type); @@ -2346,6 +2348,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type) cond_resched(); } +unlock: mmap_read_unlock(mm); return ret; } diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 791e4868f2d4e..7e9d731c45976 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -725,10 +725,10 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); spin_lock(&m->req_lock); - /* Ignore cancelled request if message has been received - * before lock. - */ - if (req->status == REQ_STATUS_RCVD) { + /* Ignore cancelled request if status changed since the request was + * processed in p9_client_flush() + */ + if (req->status != REQ_STATUS_SENT) { spin_unlock(&m->req_lock); return 0; } diff --git a/net/9p/trans_usbg.c b/net/9p/trans_usbg.c index 6b694f117aef2..468f7e8f0277b 100644 --- a/net/9p/trans_usbg.c +++ b/net/9p/trans_usbg.c @@ -231,6 +231,8 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req) struct f_usb9pfs *usb9pfs = ep->driver_data; struct usb_composite_dev *cdev = usb9pfs->function.config->cdev; struct p9_req_t *p9_rx_req; + unsigned int req_size = req->actual; + int status = REQ_STATUS_RCVD; if (req->status) { dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n", @@ -242,11 +244,19 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req) if (!p9_rx_req) return; - memcpy(p9_rx_req->rc.sdata, req->buf, req->actual); + if (req_size > p9_rx_req->rc.capacity) { + dev_err(&cdev->gadget->dev, + "%s received data size %u exceeds buffer capacity %zu\n", + ep->name, req_size, p9_rx_req->rc.capacity); + req_size = 0; + status = REQ_STATUS_ERROR; + } + + memcpy(p9_rx_req->rc.sdata, req->buf, req_size); - p9_rx_req->rc.size = req->actual; + p9_rx_req->rc.size = req_size; - p9_client_cb(usb9pfs->client, p9_rx_req, REQ_STATUS_RCVD); + p9_client_cb(usb9pfs->client, p9_rx_req, status); p9_req_put(usb9pfs->client, p9_rx_req); complete(&usb9pfs->received); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 262ff30261d67..1e537ed83ba4b 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3050,8 +3050,18 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, hci_dev_lock(hdev); + /* Check for existing connection: + * + * 1. If it doesn't exist then it must be receiver/slave role. + * 2. If it does exist confirm that it is connecting/BT_CONNECT in case + * of initiator/master role since there could be a collision where + * either side is attempting to connect or something like a fuzzing + * testing is trying to play tricks to destroy the hcon object before + * it even attempts to connect (e.g. hcon->state == BT_OPEN). + */ conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); - if (!conn) { + if (!conn || + (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) { /* In case of error status and there is no connection pending * just unlock as there is nothing to cleanup. */ @@ -5618,8 +5628,18 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, */ hci_dev_clear_flag(hdev, HCI_LE_ADV); - conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); - if (!conn) { + /* Check for existing connection: + * + * 1. If it doesn't exist then use the role to create a new object. + * 2. If it does exist confirm that it is connecting/BT_CONNECT in case + * of initiator/master role since there could be a collision where + * either side is attempting to connect or something like a fuzzing + * testing is trying to play tricks to destroy the hcon object before + * it even attempts to connect (e.g. hcon->state == BT_OPEN). + */ + conn = hci_conn_hash_lookup_role(hdev, LE_LINK, role, bdaddr); + if (!conn || + (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) { /* In case of error status and there is no connection pending * just unlock as there is nothing to cleanup. */ diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 5f5137764b80a..853acfa8e9433 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -1325,7 +1325,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; struct hci_rp_le_set_ext_adv_params rp; - bool connectable; + bool connectable, require_privacy; u32 flags; bdaddr_t random_addr; u8 own_addr_type; @@ -1363,10 +1363,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) return -EPERM; /* Set require_privacy to true only when non-connectable - * advertising is used. In that case it is fine to use a - * non-resolvable private address. + * advertising is used and it is not periodic. + * In that case it is fine to use a non-resolvable private address. */ - err = hci_get_random_address(hdev, !connectable, + require_privacy = !connectable && !(adv && adv->periodic); + + err = hci_get_random_address(hdev, require_privacy, adv_use_rpa(hdev, flags), adv, &own_addr_type, &random_addr); if (err < 0) @@ -2604,6 +2606,13 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev) hci_remove_ext_adv_instance_sync(hdev, adv->instance, NULL); } + + /* If current advertising instance is set to instance 0x00 + * then we need to re-enable it. + */ + if (!hdev->cur_adv_instance) + err = hci_enable_ext_advertising_sync(hdev, + hdev->cur_adv_instance); } else { /* Schedule for most recent instance to be restarted and begin * the software rotation loop diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index a08a0f3d5003c..2cd0b963c96bd 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -111,6 +111,8 @@ static void iso_conn_free(struct kref *ref) /* Ensure no more work items will run since hci_conn has been dropped */ disable_delayed_work_sync(&conn->timeout_work); + kfree_skb(conn->rx_skb); + kfree(conn); } @@ -743,6 +745,13 @@ static void iso_sock_kill(struct sock *sk) BT_DBG("sk %p state %d", sk, sk->sk_state); + /* Sock is dead, so set conn->sk to NULL to avoid possible UAF */ + if (iso_pi(sk)->conn) { + iso_conn_lock(iso_pi(sk)->conn); + iso_pi(sk)->conn->sk = NULL; + iso_conn_unlock(iso_pi(sk)->conn); + } + /* Kill poor orphan */ bt_sock_unlink(&iso_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); @@ -2295,7 +2304,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len -= skb->len; - return; + break; case ISO_END: skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 8b75647076bae..563cae4f76b0d 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -4412,13 +4412,11 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, return -ENOMEM; #ifdef CONFIG_BT_FEATURE_DEBUG - if (!hdev) { - flags = bt_dbg_get() ? BIT(0) : 0; + flags = bt_dbg_get() ? BIT(0) : 0; - memcpy(rp->features[idx].uuid, debug_uuid, 16); - rp->features[idx].flags = cpu_to_le32(flags); - idx++; - } + memcpy(rp->features[idx].uuid, debug_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; #endif if (hdev && hci_dev_le_state_simultaneous(hdev)) { diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index f2efb58d152bc..13d6c3f51c29d 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -1455,7 +1455,7 @@ void br_vlan_fill_forward_path_pvid(struct net_bridge *br, if (!br_opt_get(br, BROPT_VLAN_ENABLED)) return; - vg = br_vlan_group(br); + vg = br_vlan_group_rcu(br); if (idx >= 0 && ctx->vlan[idx].proto == br->vlan_proto) { diff --git a/net/core/dst.c b/net/core/dst.c index cc990706b6451..9a0ddef8bee43 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -150,7 +150,7 @@ void dst_dev_put(struct dst_entry *dst) dst->ops->ifdown(dst, dev); WRITE_ONCE(dst->input, dst_discard); WRITE_ONCE(dst->output, dst_discard_out); - dst->dev = blackhole_netdev; + WRITE_ONCE(dst->dev, blackhole_netdev); netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker, GFP_ATOMIC); } @@ -263,7 +263,7 @@ unsigned int dst_blackhole_mtu(const struct dst_entry *dst) { unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); - return mtu ? : dst->dev->mtu; + return mtu ? : dst_dev(dst)->mtu; } EXPORT_SYMBOL_GPL(dst_blackhole_mtu); diff --git a/net/core/filter.c b/net/core/filter.c index 02fedc404d7f7..fef4d85fee008 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2289,6 +2289,7 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(dst)) goto out_drop; + skb_dst_drop(skb); skb_dst_set(skb, dst); } else if (nh->nh_family != AF_INET6) { goto out_drop; @@ -2397,6 +2398,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, goto out_drop; } + skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); } @@ -9233,13 +9235,17 @@ static bool sock_addr_is_valid_access(int off, int size, return false; info->reg_type = PTR_TO_SOCKET; break; - default: - if (type == BPF_READ) { - if (size != size_default) - return false; - } else { + case bpf_ctx_range(struct bpf_sock_addr, user_family): + case bpf_ctx_range(struct bpf_sock_addr, family): + case bpf_ctx_range(struct bpf_sock_addr, type): + case bpf_ctx_range(struct bpf_sock_addr, protocol): + if (type != BPF_READ) return false; - } + if (size != size_default) + return false; + break; + default: + return false; } return true; diff --git a/net/core/page_pool.c b/net/core/page_pool.c index b1c3e0ad6dbf4..6a7d740b396f6 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -462,11 +462,60 @@ page_pool_dma_sync_for_device(const struct page_pool *pool, } } +static int page_pool_register_dma_index(struct page_pool *pool, + netmem_ref netmem, gfp_t gfp) +{ + int err = 0; + u32 id; + + if (unlikely(!PP_DMA_INDEX_BITS)) + goto out; + + if (in_softirq()) + err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem), + PP_DMA_INDEX_LIMIT, gfp); + else + err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem), + PP_DMA_INDEX_LIMIT, gfp); + if (err) { + WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@"); + goto out; + } + + netmem_set_dma_index(netmem, id); +out: + return err; +} + +static int page_pool_release_dma_index(struct page_pool *pool, + netmem_ref netmem) +{ + struct page *old, *page = netmem_to_page(netmem); + unsigned long id; + + if (unlikely(!PP_DMA_INDEX_BITS)) + return 0; + + id = netmem_get_dma_index(netmem); + if (!id) + return -1; + + if (in_softirq()) + old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0); + else + old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0); + if (old != page) + return -1; + + netmem_set_dma_index(netmem, 0); + + return 0; +} + static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp) { dma_addr_t dma; int err; - u32 id; /* Setup DMA mapping: use 'struct page' area for storing DMA-addr * since dma_addr_t can be either 32 or 64 bits and does not always fit @@ -485,18 +534,10 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t g goto unmap_failed; } - if (in_softirq()) - err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem), - PP_DMA_INDEX_LIMIT, gfp); - else - err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem), - PP_DMA_INDEX_LIMIT, gfp); - if (err) { - WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@"); + err = page_pool_register_dma_index(pool, netmem, gfp); + if (err) goto unset_failed; - } - netmem_set_dma_index(netmem, id); page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len); return true; @@ -669,8 +710,6 @@ void page_pool_clear_pp_info(netmem_ref netmem) static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, netmem_ref netmem) { - struct page *old, *page = netmem_to_page(netmem); - unsigned long id; dma_addr_t dma; if (!pool->dma_map) @@ -679,15 +718,7 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, */ return; - id = netmem_get_dma_index(netmem); - if (!id) - return; - - if (in_softirq()) - old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0); - else - old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0); - if (old != page) + if (page_pool_release_dma_index(pool, netmem)) return; dma = page_pool_get_dma_addr_netmem(netmem); @@ -697,7 +728,6 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); page_pool_set_dma_addr_netmem(netmem, 0); - netmem_set_dma_index(netmem, 0); } /* Disconnects a page (from a page_pool). API users can have a need diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4d0ee1c9002aa..650c3c20e79ff 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -4414,9 +4414,6 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, int err; u16 vid; - if (!netlink_capable(skb, CAP_NET_ADMIN)) - return -EPERM; - if (!del_bulk) { err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index cf54593149cce..6a92c03ee6f42 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -6603,7 +6603,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, return NULL; while (data_len) { - if (nr_frags == MAX_SKB_FRAGS - 1) + if (nr_frags == MAX_SKB_FRAGS) goto failure; while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) order--; diff --git a/net/core/sock.c b/net/core/sock.c index d392cb37a864f..a5f248a914042 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2534,8 +2534,8 @@ static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst) !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)); #endif /* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */ - max_size = is_ipv6 ? READ_ONCE(dst->dev->gso_max_size) : - READ_ONCE(dst->dev->gso_ipv4_max_size); + max_size = is_ipv6 ? READ_ONCE(dst_dev(dst)->gso_max_size) : + READ_ONCE(dst_dev(dst)->gso_ipv4_max_size); if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk)) max_size = GSO_LEGACY_MAX_SIZE; @@ -2546,9 +2546,13 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { u32 max_segs = 1; - sk->sk_route_caps = dst->dev->features; - if (sk_is_tcp(sk)) + sk->sk_route_caps = dst_dev(dst)->features; + if (sk_is_tcp(sk)) { + struct inet_connection_sock *icsk = inet_csk(sk); + sk->sk_route_caps |= NETIF_F_GSO; + icsk->icsk_ack.dst_quick_ack = dst_metric(dst, RTAX_QUICKACK); + } if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; if (unlikely(sk->sk_gso_disabled)) @@ -2560,7 +2564,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst); /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */ - max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1); + max_segs = max_t(u32, READ_ONCE(dst_dev(dst)->gso_max_segs), 1); } } sk->sk_gso_max_segs = max_segs; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 8f11870b77377..508b23204edc5 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -311,18 +311,20 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, { struct dst_entry *dst = &rt->dst; struct inet_peer *peer; + struct net_device *dev; bool rc = true; if (!apply_ratelimit) return true; /* No rate limit on loopback */ - if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) + dev = dst_dev(dst); + if (dev && (dev->flags & IFF_LOOPBACK)) goto out; rcu_read_lock(); peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, - l3mdev_master_ifindex_rcu(dst->dev)); + l3mdev_master_ifindex_rcu(dev)); rc = inet_peer_xrlim_allow(peer, READ_ONCE(net->ipv4.sysctl_icmp_ratelimit)); rcu_read_unlock(); @@ -468,13 +470,13 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) */ static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb) { - struct net_device *route_lookup_dev = NULL; + struct net_device *dev = skb->dev; + const struct dst_entry *dst; - if (skb->dev) - route_lookup_dev = skb->dev; - else if (skb_dst(skb)) - route_lookup_dev = skb_dst(skb)->dev; - return route_lookup_dev; + if (dev) + return dev; + dst = skb_dst(skb); + return dst ? dst_dev(dst) : NULL; } static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, @@ -873,7 +875,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb) struct net *net; u32 info = 0; - net = dev_net_rcu(skb_dst(skb)->dev); + net = skb_dst_dev_net_rcu(skb); /* * Incomplete header ? @@ -1016,7 +1018,7 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb) struct icmp_bxm icmp_param; struct net *net; - net = dev_net_rcu(skb_dst(skb)->dev); + net = skb_dst_dev_net_rcu(skb); /* should there be an ICMP stat for ignored echos? */ if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all)) return SKB_NOT_DROPPED_YET; @@ -1186,7 +1188,7 @@ static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb) return SKB_NOT_DROPPED_YET; out_err: - __ICMP_INC_STATS(dev_net_rcu(skb_dst(skb)->dev), ICMP_MIB_INERRORS); + __ICMP_INC_STATS(skb_dst_dev_net_rcu(skb), ICMP_MIB_INERRORS); return SKB_DROP_REASON_PKT_TOO_SMALL; } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 9bf09de6a2e77..f4a87b90351e9 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -424,7 +424,7 @@ static int igmpv3_sendpack(struct sk_buff *skb) pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen); - return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); + return ip_local_out(skb_dst_dev_net(skb), skb->sk, skb); } static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 9ca0a183a55ff..183856b0b7409 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -488,7 +488,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, /* Process an incoming IP datagram fragment. */ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) { - struct net_device *dev = skb->dev ? : skb_dst(skb)->dev; + struct net_device *dev = skb->dev ? : skb_dst_dev(skb); int vif = l3mdev_master_ifindex_rcu(dev); struct ipq *qp; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 49811c9281d42..0bda561aa8a89 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -117,7 +117,7 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) skb->protocol = htons(ETH_P_IP); return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, - net, sk, skb, NULL, skb_dst(skb)->dev, + net, sk, skb, NULL, skb_dst_dev(skb), dst_output); } @@ -200,7 +200,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s { struct dst_entry *dst = skb_dst(skb); struct rtable *rt = dst_rtable(dst); - struct net_device *dev = dst->dev; + struct net_device *dev = dst_dev(dst); unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; bool is_v6gw = false; @@ -426,15 +426,20 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb) int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev; + struct net_device *dev, *indev = skb->dev; + int ret_val; + rcu_read_lock(); + dev = skb_dst_dev_rcu(skb); skb->dev = dev; skb->protocol = htons(ETH_P_IP); - return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, - net, sk, skb, indev, dev, - ip_finish_output, - !(IPCB(skb)->flags & IPSKB_REROUTED)); + ret_val = NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, + net, sk, skb, indev, dev, + ip_finish_output, + !(IPCB(skb)->flags & IPSKB_REROUTED)); + rcu_read_unlock(); + return ret_val; } EXPORT_SYMBOL(ip_output); diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 09b73acf037ae..7c77d06372d19 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -567,20 +567,6 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, return 0; } -static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom) -{ - /* we must cap headroom to some upperlimit, else pskb_expand_head - * will overflow header offsets in skb_headers_offset_update(). - */ - static const unsigned int max_allowed = 512; - - if (headroom > max_allowed) - headroom = max_allowed; - - if (headroom > READ_ONCE(dev->needed_headroom)) - WRITE_ONCE(dev->needed_headroom, headroom); -} - void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto, int tunnel_hlen) { diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index f0b4419cef349..fc95161663071 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -229,7 +229,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, goto tx_error_icmp; } - tdev = dst->dev; + tdev = dst_dev(dst); if (tdev == dev) { dst_release(dst); @@ -259,7 +259,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, xmit: skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); skb_dst_set(skb, dst); - skb->dev = skb_dst(skb)->dev; + skb->dev = skb_dst_dev(skb); err = dst_output(tunnel->net, skb->sk, skb); if (net_xmit_eval(err) == 0) diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index e0aab66cd9251..dff06b9eb6607 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -20,12 +20,12 @@ /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type) { + struct net_device *dev = skb_dst_dev(skb); const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; __u8 flags; - struct net_device *dev = skb_dst(skb)->dev; struct flow_keys flkeys; unsigned int hh_len; @@ -74,7 +74,7 @@ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, un #endif /* Change in oif may mean change in hh_len. */ - hh_len = skb_dst(skb)->dev->hard_header_len; + hh_len = skb_dst_dev(skb)->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 93aaea0006ba7..c52ff9364ae8d 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -2375,6 +2375,13 @@ static int replace_nexthop_single(struct net *net, struct nexthop *old, return -EINVAL; } + if (!list_empty(&old->grp_list) && + rtnl_dereference(new->nh_info)->fdb_nh != + rtnl_dereference(old->nh_info)->fdb_nh) { + NL_SET_ERR_MSG(extack, "Cannot change nexthop FDB status while in a group"); + return -EINVAL; + } + err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack); if (err) return err; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 619ddc087957f..37a3fa98d904f 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -77,6 +77,7 @@ static inline struct hlist_head *ping_hashslot(struct ping_table *table, int ping_get_port(struct sock *sk, unsigned short ident) { + struct net *net = sock_net(sk); struct inet_sock *isk, *isk2; struct hlist_head *hlist; struct sock *sk2 = NULL; @@ -90,9 +91,10 @@ int ping_get_port(struct sock *sk, unsigned short ident) for (i = 0; i < (1L << 16); i++, result++) { if (!result) result++; /* avoid zero */ - hlist = ping_hashslot(&ping_table, sock_net(sk), - result); + hlist = ping_hashslot(&ping_table, net, result); sk_for_each(sk2, hlist) { + if (!net_eq(sock_net(sk2), net)) + continue; isk2 = inet_sk(sk2); if (isk2->inet_num == result) @@ -108,8 +110,10 @@ int ping_get_port(struct sock *sk, unsigned short ident) if (i >= (1L << 16)) goto fail; } else { - hlist = ping_hashslot(&ping_table, sock_net(sk), ident); + hlist = ping_hashslot(&ping_table, net, ident); sk_for_each(sk2, hlist) { + if (!net_eq(sock_net(sk2), net)) + continue; isk2 = inet_sk(sk2); /* BUG? Why is this reuse and not reuseaddr? ping.c @@ -129,7 +133,7 @@ int ping_get_port(struct sock *sk, unsigned short ident) pr_debug("was not hashed\n"); sk_add_node_rcu(sk, hlist); sock_set_flag(sk, SOCK_RCU_FREE); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); + sock_prot_inuse_add(net, sk->sk_prot, 1); } spin_unlock(&ping_table.lock); return 0; @@ -188,6 +192,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) } sk_for_each_rcu(sk, hslot) { + if (!net_eq(sock_net(sk), net)) + continue; isk = inet_sk(sk); pr_debug("iterate\n"); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 261ddb6542a40..7d04df4fc6608 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -413,7 +413,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr) { const struct rtable *rt = container_of(dst, struct rtable, dst); - struct net_device *dev = dst->dev; + struct net_device *dev = dst_dev(dst); struct neighbour *n; rcu_read_lock(); @@ -440,7 +440,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr) { const struct rtable *rt = container_of(dst, struct rtable, dst); - struct net_device *dev = dst->dev; + struct net_device *dev = dst_dev(dst); const __be32 *pkey = daddr; if (rt->rt_gw_family == AF_INET) { @@ -1025,7 +1025,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) return; rcu_read_lock(); - net = dev_net_rcu(dst->dev); + net = dev_net_rcu(dst_dev(dst)); if (mtu < net->ipv4.ip_rt_min_pmtu) { lock = true; mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu); @@ -1323,7 +1323,7 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst) struct net *net; rcu_read_lock(); - net = dev_net_rcu(dst->dev); + net = dev_net_rcu(dst_dev(dst)); advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size, net->ipv4.ip_rt_min_advmss); rcu_read_unlock(); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 988992ff898b3..795ffa62cc0e6 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1735,6 +1735,7 @@ EXPORT_SYMBOL(tcp_peek_len); /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ int tcp_set_rcvlowat(struct sock *sk, int val) { + struct tcp_sock *tp = tcp_sk(sk); int space, cap; if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) @@ -1753,7 +1754,9 @@ int tcp_set_rcvlowat(struct sock *sk, int val) space = tcp_space_from_win(sk, val); if (space > sk->sk_rcvbuf) { WRITE_ONCE(sk->sk_rcvbuf, space); - WRITE_ONCE(tcp_sk(sk)->window_clamp, val); + + if (tp->window_clamp && tp->window_clamp < val) + WRITE_ONCE(tp->window_clamp, val); } return 0; } @@ -3058,8 +3061,8 @@ bool tcp_check_oom(const struct sock *sk, int shift) void __tcp_close(struct sock *sk, long timeout) { + bool data_was_unread = false; struct sk_buff *skb; - int data_was_unread = 0; int state; WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); @@ -3078,11 +3081,12 @@ void __tcp_close(struct sock *sk, long timeout) * reader process may not have drained the data yet! */ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { - u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; + u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) - len--; - data_was_unread += len; + end_seq--; + if (after(end_seq, tcp_sk(sk)->copied_seq)) + data_was_unread = true; __kfree_skb(skb); } diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 408985eb74eef..86c995dc1c5e5 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -558,6 +558,7 @@ bool tcp_fastopen_active_should_disable(struct sock *sk) void tcp_fastopen_active_disable_ofo_check(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); + struct net_device *dev; struct dst_entry *dst; struct sk_buff *skb; @@ -575,7 +576,8 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk) } else if (tp->syn_fastopen_ch && atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) { dst = sk_dst_get(sk); - if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))) + dev = dst ? dst_dev(dst) : NULL; + if (!(dev && (dev->flags & IFF_LOOPBACK))) atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0); dst_release(dst); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 30f4375f8431b..1d9e93a04930b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -331,9 +331,8 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) static bool tcp_in_quickack_mode(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); - const struct dst_entry *dst = __sk_dst_get(sk); - return (dst && dst_metric(dst, RTAX_QUICKACK)) || + return icsk->icsk_ack.dst_quick_ack || (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk)); } @@ -7338,7 +7337,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, &foc, TCP_SYNACK_FASTOPEN, skb); /* Add the child socket directly into the accept queue */ if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) { - reqsk_fastopen_remove(fastopen_sk, req, false); bh_unlock_sock(fastopen_sk); sock_put(fastopen_sk); goto drop_and_free; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 824048679e1b8..1572562b0498c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -494,14 +494,14 @@ int tcp_v4_err(struct sk_buff *skb, u32 info) { const struct iphdr *iph = (const struct iphdr *)skb->data; struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); - struct tcp_sock *tp; + struct net *net = dev_net_rcu(skb->dev); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; - struct sock *sk; struct request_sock *fastopen; + struct tcp_sock *tp; u32 seq, snd_una; + struct sock *sk; int err; - struct net *net = dev_net(skb->dev); sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, iph->daddr, th->dest, iph->saddr, @@ -786,7 +786,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb, arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); - net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); + net = sk ? sock_net(sk) : skb_dst_dev_net_rcu(skb); /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(tcp_hdr(skb), &md5_hash_location, &aoh)) @@ -1965,7 +1965,7 @@ EXPORT_SYMBOL(tcp_v4_do_rcv); int tcp_v4_early_demux(struct sk_buff *skb) { - struct net *net = dev_net(skb->dev); + struct net *net = dev_net_rcu(skb->dev); const struct iphdr *iph; const struct tcphdr *th; struct sock *sk; @@ -2176,7 +2176,7 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, int tcp_v4_rcv(struct sk_buff *skb) { - struct net *net = dev_net(skb->dev); + struct net *net = dev_net_rcu(skb->dev); enum skb_drop_reason drop_reason; int sdif = inet_sdif(skb); int dif = inet_iif(skb); diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 95669935494ef..03c068ea27b6a 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -166,11 +166,11 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, unsigned int hash) { struct tcp_metrics_block *tm; - struct net *net; bool reclaim = false; + struct net *net; spin_lock_bh(&tcp_metrics_lock); - net = dev_net(dst->dev); + net = dev_net_rcu(dst_dev(dst)); /* While waiting for the spin-lock the cache might have been populated * with this entry and so we have to check again. @@ -273,7 +273,7 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req, return NULL; } - net = dev_net(dst->dev); + net = dev_net_rcu(dst_dev(dst)); hash ^= net_hash_mix(net); hash = hash_32(hash, tcp_metrics_hash_log); @@ -318,7 +318,7 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, else return NULL; - net = dev_net(dst->dev); + net = dev_net_rcu(dst_dev(dst)); hash ^= net_hash_mix(net); hash = hash_32(hash, tcp_metrics_hash_log); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6d5387811c32a..5e37dc45639db 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2217,7 +2217,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, u32 max_segs) { const struct inet_connection_sock *icsk = inet_csk(sk); - u32 send_win, cong_win, limit, in_flight; + u32 send_win, cong_win, limit, in_flight, threshold; + u64 srtt_in_ns, expected_ack, how_far_is_the_ack; struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *head; int win_divisor; @@ -2279,9 +2280,19 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, head = tcp_rtx_queue_head(sk); if (!head) goto send_now; - delta = tp->tcp_clock_cache - head->tstamp; - /* If next ACK is likely to come too late (half srtt), do not defer */ - if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) + + srtt_in_ns = (u64)(NSEC_PER_USEC >> 3) * tp->srtt_us; + /* When is the ACK expected ? */ + expected_ack = head->tstamp + srtt_in_ns; + /* How far from now is the ACK expected ? */ + how_far_is_the_ack = expected_ack - tp->tcp_clock_cache; + + /* If next ACK is likely to come too late, + * ie in more than min(1ms, half srtt), do not defer. + */ + threshold = min(srtt_in_ns >> 1, NSEC_PER_MSEC); + + if ((s64)(how_far_is_the_ack - threshold) > 0) goto send_now; /* Ok, it looks like it is advisable to defer. diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 3cff51ba72bb0..0ae67d537499a 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -31,7 +31,7 @@ static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb) int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb) { return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, - net, sk, skb, skb->dev, skb_dst(skb)->dev, + net, sk, skb, skb->dev, skb_dst_dev(skb), __xfrm4_output, !(IPCB(skb)->flags & IPSKB_REROUTED)); } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 5350c9bb2319b..b72ca10349068 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1257,8 +1257,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, */ max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) + dst->header_len + t->hlen; - if (max_headroom > READ_ONCE(dev->needed_headroom)) - WRITE_ONCE(dev->needed_headroom, max_headroom); + ip_tunnel_adj_headroom(dev, max_headroom); err = ip6_tnl_encap(skb, t, &proto, fl6); if (err) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 882ce5444572e..06edfe0b2db90 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -376,7 +376,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); - struct net *net = dev_net(skb->dev); + struct net *net = dev_net_rcu(skb->dev); struct request_sock *fastopen; struct ipv6_pinfo *np; struct tcp_sock *tp; @@ -864,16 +864,16 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 int oif, int rst, u8 tclass, __be32 label, u32 priority, u32 txhash, struct tcp_key *key) { - const struct tcphdr *th = tcp_hdr(skb); - struct tcphdr *t1; - struct sk_buff *buff; - struct flowi6 fl6; - struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); - struct sock *ctl_sk = net->ipv6.tcp_sk; + struct net *net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev); unsigned int tot_len = sizeof(struct tcphdr); + struct sock *ctl_sk = net->ipv6.tcp_sk; + const struct tcphdr *th = tcp_hdr(skb); __be32 mrst = 0, *topt; struct dst_entry *dst; - __u32 mark = 0; + struct sk_buff *buff; + struct tcphdr *t1; + struct flowi6 fl6; + u32 mark = 0; if (tsecr) tot_len += TCPOLEN_TSTAMP_ALIGNED; @@ -1036,7 +1036,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb, if (!sk && !ipv6_unicast_destination(skb)) return; - net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); + net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev); /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(th, &md5_hash_location, &aoh)) return; @@ -1739,6 +1739,7 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) { + struct net *net = dev_net_rcu(skb->dev); enum skb_drop_reason drop_reason; int sdif = inet6_sdif(skb); int dif = inet6_iif(skb); @@ -1748,7 +1749,6 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) bool refcounted; int ret; u32 isn; - struct net *net = dev_net(skb->dev); drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (skb->pkt_type != PACKET_HOST) @@ -1999,7 +1999,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) void tcp_v6_early_demux(struct sk_buff *skb) { - struct net *net = dev_net(skb->dev); + struct net *net = dev_net_rcu(skb->dev); const struct ipv6hdr *hdr; const struct tcphdr *th; struct sock *sk; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 8c0d91dfd7e2b..538c6eea645f2 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -5280,12 +5280,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, } rx.sdata = prev_sta->sdata; + if (!status->link_valid && prev_sta->sta.mlo) { + struct link_sta_info *link_sta; + + link_sta = link_sta_info_get_bss(rx.sdata, + hdr->addr2); + if (!link_sta) + continue; + + link_id = link_sta->link_id; + } + if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) goto out; - if (!status->link_valid && prev_sta->sta.mlo) - continue; - ieee80211_prepare_and_rx_handle(&rx, skb, false); prev_sta = sta; @@ -5293,10 +5301,18 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, if (prev_sta) { rx.sdata = prev_sta->sdata; - if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) - goto out; + if (!status->link_valid && prev_sta->sta.mlo) { + struct link_sta_info *link_sta; + + link_sta = link_sta_info_get_bss(rx.sdata, + hdr->addr2); + if (!link_sta) + goto out; - if (!status->link_valid && prev_sta->sta.mlo) + link_id = link_sta->link_id; + } + + if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) goto out; if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c index dd595d9b5e50c..0d60556cfefab 100644 --- a/net/mptcp/ctrl.c +++ b/net/mptcp/ctrl.c @@ -381,10 +381,15 @@ void mptcp_active_enable(struct sock *sk) struct mptcp_pernet *pernet = mptcp_get_pernet(sock_net(sk)); if (atomic_read(&pernet->active_disable_times)) { - struct dst_entry *dst = sk_dst_get(sk); + struct net_device *dev; + struct dst_entry *dst; - if (dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)) + rcu_read_lock(); + dst = __sk_dst_get(sk); + dev = dst ? dst_dev_rcu(dst) : NULL; + if (!(dev && (dev->flags & IFF_LOOPBACK))) atomic_set(&pernet->active_disable_times, 0); + rcu_read_unlock(); } } diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 2c8815daf5b04..1b7541206a70b 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -226,9 +226,12 @@ void mptcp_pm_add_addr_received(const struct sock *ssk, } else { __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); } - /* id0 should not have a different address */ + /* - id0 should not have a different address + * - special case for C-flag: linked to fill_local_addresses_vec() + */ } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) || - (addr->id > 0 && !READ_ONCE(pm->accept_addr))) { + (addr->id > 0 && !READ_ONCE(pm->accept_addr) && + !mptcp_pm_add_addr_c_flag_case(msk))) { mptcp_pm_announce_addr(msk, addr, true); mptcp_pm_add_addr_send_ack(msk); } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 463c2e7956d52..4d9a5c8f3b2f0 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -618,6 +618,10 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) } subflow: + /* No need to try establishing subflows to remote id0 if not allowed */ + if (mptcp_pm_add_addr_c_flag_case(msk)) + goto exit; + /* check if should create a new subflow */ while (msk->pm.local_addr_used < local_addr_max && msk->pm.subflows < subflows_max) { @@ -649,6 +653,8 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) __mptcp_subflow_connect(sk, &local, &addrs[i]); spin_lock_bh(&msk->pm.lock); } + +exit: mptcp_pm_nl_check_work_pending(msk); } @@ -674,10 +680,12 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info mpc_addr; struct pm_nl_pernet *pernet; unsigned int subflows_max; + bool c_flag_case; int i = 0; pernet = pm_nl_get_pernet_from_msk(msk); subflows_max = mptcp_pm_get_subflows_max(msk); + c_flag_case = remote->id && mptcp_pm_add_addr_c_flag_case(msk); mptcp_local_address((struct sock_common *)msk, &mpc_addr); @@ -690,12 +698,27 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, continue; if (msk->pm.subflows < subflows_max) { + bool is_id0; + locals[i].addr = entry->addr; locals[i].flags = entry->flags; locals[i].ifindex = entry->ifindex; + is_id0 = mptcp_addresses_equal(&locals[i].addr, + &mpc_addr, + locals[i].addr.port); + + if (c_flag_case && + (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) { + __clear_bit(locals[i].addr.id, + msk->pm.id_avail_bitmap); + + if (!is_id0) + msk->pm.local_addr_used++; + } + /* Special case for ID0: set the correct ID */ - if (mptcp_addresses_equal(&locals[i].addr, &mpc_addr, locals[i].addr.port)) + if (is_id0) locals[i].addr.id = 0; msk->pm.subflows++; @@ -704,6 +727,37 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, } rcu_read_unlock(); + /* Special case: peer sets the C flag, accept one ADD_ADDR if default + * limits are used -- accepting no ADD_ADDR -- and use subflow endpoints + */ + if (!i && c_flag_case) { + unsigned int local_addr_max = mptcp_pm_get_local_addr_max(msk); + + while (msk->pm.local_addr_used < local_addr_max && + msk->pm.subflows < subflows_max) { + struct mptcp_pm_local *local = &locals[i]; + + if (!select_local_address(pernet, msk, local)) + break; + + __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); + + if (!mptcp_pm_addr_families_match(sk, &local->addr, + remote)) + continue; + + if (mptcp_addresses_equal(&local->addr, &mpc_addr, + local->addr.port)) + continue; + + msk->pm.local_addr_used++; + msk->pm.subflows++; + i++; + } + + return i; + } + /* If the array is empty, fill in the single * 'IPADDRANY' local address */ diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 6f191b1259788..9653fee227ab2 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -1172,6 +1172,14 @@ static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk) spin_unlock_bh(&msk->pm.lock); } +static inline bool mptcp_pm_add_addr_c_flag_case(struct mptcp_sock *msk) +{ + return READ_ONCE(msk->pm.remote_deny_join_id0) && + msk->pm.local_addr_used == 0 && + mptcp_pm_get_add_addr_accept_max(msk) == 0 && + msk->pm.subflows < mptcp_pm_get_subflows_max(msk); +} + void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk); static inline struct mptcp_ext *mptcp_get_ext(const struct sk_buff *skb) diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index 5251524b96afa..5e4453e9ef8e7 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -63,7 +63,7 @@ struct hbucket { : jhash_size((htable_bits) - HTABLE_REGION_BITS)) #define ahash_sizeof_regions(htable_bits) \ (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region)) -#define ahash_region(n, htable_bits) \ +#define ahash_region(n) \ ((n) / jhash_size(HTABLE_REGION_BITS)) #define ahash_bucket_start(h, htable_bits) \ ((htable_bits) < HTABLE_REGION_BITS ? 0 \ @@ -702,7 +702,7 @@ mtype_resize(struct ip_set *set, bool retried) #endif key = HKEY(data, h->initval, htable_bits); m = __ipset_dereference(hbucket(t, key)); - nr = ahash_region(key, htable_bits); + nr = ahash_region(key); if (!m) { m = kzalloc(sizeof(*m) + AHASH_INIT_SIZE * dsize, @@ -852,7 +852,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, rcu_read_lock_bh(); t = rcu_dereference_bh(h->table); key = HKEY(value, h->initval, t->htable_bits); - r = ahash_region(key, t->htable_bits); + r = ahash_region(key); atomic_inc(&t->uref); elements = t->hregion[r].elements; maxelem = t->maxelem; @@ -1050,7 +1050,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, rcu_read_lock_bh(); t = rcu_dereference_bh(h->table); key = HKEY(value, h->initval, t->htable_bits); - r = ahash_region(key, t->htable_bits); + r = ahash_region(key); atomic_inc(&t->uref); rcu_read_unlock_bh(); diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index c0289f83f96df..327baa17882a8 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -885,7 +885,7 @@ static void ip_vs_conn_expire(struct timer_list *t) * conntrack cleanup for the net. */ smp_rmb(); - if (ipvs->enable) + if (READ_ONCE(ipvs->enable)) ip_vs_conn_drop_conntrack(cp); } @@ -1433,7 +1433,7 @@ void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs) cond_resched_rcu(); /* netns clean up started, abort delayed work */ - if (!ipvs->enable) + if (!READ_ONCE(ipvs->enable)) break; } rcu_read_unlock(); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index c7a8a08b73089..5ea7ab8bf4dcc 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1353,9 +1353,6 @@ ip_vs_out_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *stat if (unlikely(!skb_dst(skb))) return NF_ACCEPT; - if (!ipvs->enable) - return NF_ACCEPT; - ip_vs_fill_iph_skb(af, skb, false, &iph); #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { @@ -1940,7 +1937,7 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state return NF_ACCEPT; } /* ipvs enabled in this netns ? */ - if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) + if (unlikely(sysctl_backup_only(ipvs))) return NF_ACCEPT; ip_vs_fill_iph_skb(af, skb, false, &iph); @@ -2108,7 +2105,7 @@ ip_vs_forward_icmp(void *priv, struct sk_buff *skb, int r; /* ipvs enabled in this netns ? */ - if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) + if (unlikely(sysctl_backup_only(ipvs))) return NF_ACCEPT; if (state->pf == NFPROTO_IPV4) { @@ -2295,7 +2292,7 @@ static int __net_init __ip_vs_init(struct net *net) return -ENOMEM; /* Hold the beast until a service is registered */ - ipvs->enable = 0; + WRITE_ONCE(ipvs->enable, 0); ipvs->net = net; /* Counters used for creating unique names */ ipvs->gen = atomic_read(&ipvs_netns_cnt); @@ -2367,7 +2364,7 @@ static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list) ipvs = net_ipvs(net); ip_vs_unregister_hooks(ipvs, AF_INET); ip_vs_unregister_hooks(ipvs, AF_INET6); - ipvs->enable = 0; /* Disable packet reception */ + WRITE_ONCE(ipvs->enable, 0); /* Disable packet reception */ smp_wmb(); ip_vs_sync_net_cleanup(ipvs); } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 3224f6e17e736..3219338feca4d 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -256,7 +256,7 @@ static void est_reload_work_handler(struct work_struct *work) struct ip_vs_est_kt_data *kd = ipvs->est_kt_arr[id]; /* netns clean up started, abort delayed work */ - if (!ipvs->enable) + if (!READ_ONCE(ipvs->enable)) goto unlock; if (!kd) continue; @@ -1482,9 +1482,9 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, *svc_p = svc; - if (!ipvs->enable) { + if (!READ_ONCE(ipvs->enable)) { /* Now there is a service - full throttle */ - ipvs->enable = 1; + WRITE_ONCE(ipvs->enable, 1); /* Start estimation for first time */ ip_vs_est_reload_start(ipvs); diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index f821ad2e19b35..3492108bb3b97 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c @@ -231,7 +231,7 @@ static int ip_vs_estimation_kthread(void *data) void ip_vs_est_reload_start(struct netns_ipvs *ipvs) { /* Ignore reloads before first service is added */ - if (!ipvs->enable) + if (!READ_ONCE(ipvs->enable)) return; ip_vs_est_stopped_recalc(ipvs); /* Bump the kthread configuration genid */ @@ -305,7 +305,7 @@ static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs) int i; if ((unsigned long)ipvs->est_kt_count >= ipvs->est_max_threads && - ipvs->enable && ipvs->est_max_threads) + READ_ONCE(ipvs->enable) && ipvs->est_max_threads) return -EINVAL; mutex_lock(&ipvs->est_mutex); @@ -342,7 +342,7 @@ static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs) } /* Start kthread tasks only when services are present */ - if (ipvs->enable && !ip_vs_est_stopped(ipvs)) { + if (READ_ONCE(ipvs->enable) && !ip_vs_est_stopped(ipvs)) { ret = ip_vs_est_kthread_start(ipvs, kd); if (ret < 0) goto out; @@ -485,7 +485,7 @@ int ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats) struct ip_vs_estimator *est = &stats->est; int ret; - if (!ipvs->est_max_threads && ipvs->enable) + if (!ipvs->est_max_threads && READ_ONCE(ipvs->enable)) ipvs->est_max_threads = ip_vs_est_max_threads(ipvs); est->ktid = -1; @@ -662,7 +662,7 @@ static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max) /* Wait for cpufreq frequency transition */ wait_event_idle_timeout(wq, kthread_should_stop(), HZ / 50); - if (!ipvs->enable || kthread_should_stop()) + if (!READ_ONCE(ipvs->enable) || kthread_should_stop()) goto stop; } @@ -680,7 +680,7 @@ static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max) rcu_read_unlock(); local_bh_enable(); - if (!ipvs->enable || kthread_should_stop()) + if (!READ_ONCE(ipvs->enable) || kthread_should_stop()) goto stop; cond_resched(); @@ -756,7 +756,7 @@ static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs) mutex_lock(&ipvs->est_mutex); for (id = 1; id < ipvs->est_kt_count; id++) { /* netns clean up started, abort */ - if (!ipvs->enable) + if (!READ_ONCE(ipvs->enable)) goto unlock2; kd = ipvs->est_kt_arr[id]; if (!kd) @@ -786,7 +786,7 @@ static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs) id = ipvs->est_kt_count; next_kt: - if (!ipvs->enable || kthread_should_stop()) + if (!READ_ONCE(ipvs->enable) || kthread_should_stop()) goto unlock; id--; if (id < 0) diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index d8a284999544b..206c6700e2006 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -53,6 +53,7 @@ enum { IP_VS_FTP_EPSV, }; +static bool exiting_module; /* * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper * First port is set to the default port. @@ -605,7 +606,7 @@ static void __ip_vs_ftp_exit(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); - if (!ipvs) + if (!ipvs || !exiting_module) return; unregister_ip_vs_app(ipvs, &ip_vs_ftp); @@ -627,6 +628,7 @@ static int __init ip_vs_ftp_init(void) */ static void __exit ip_vs_ftp_exit(void) { + exiting_module = true; unregister_pernet_subsys(&ip_vs_ftp_ops); /* rcu_barrier() is called by netns */ } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 7784ec094097b..f12d0d229aaa5 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -376,6 +376,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, const struct nfnetlink_subsystem *ss; const struct nfnl_callback *nc; struct netlink_ext_ack extack; + struct nlmsghdr *onlh = nlh; LIST_HEAD(err_list); u32 status; int err; @@ -386,6 +387,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, status = 0; replay_abort: skb = netlink_skb_clone(oskb, GFP_KERNEL); + nlh = onlh; if (!skb) return netlink_ack(oskb, nlh, -ENOMEM, NULL); diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index 8ee66a86c3bc7..1a62e384766a7 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -22,6 +22,35 @@ void nft_objref_eval(const struct nft_expr *expr, obj->ops->eval(obj, regs, pkt); } +static int nft_objref_validate_obj_type(const struct nft_ctx *ctx, u32 type) +{ + unsigned int hooks; + + switch (type) { + case NFT_OBJECT_SYNPROXY: + if (ctx->family != NFPROTO_IPV4 && + ctx->family != NFPROTO_IPV6 && + ctx->family != NFPROTO_INET) + return -EOPNOTSUPP; + + hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD); + + return nft_chain_validate_hooks(ctx->chain, hooks); + default: + break; + } + + return 0; +} + +static int nft_objref_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + struct nft_object *obj = nft_objref_priv(expr); + + return nft_objref_validate_obj_type(ctx, obj->ops->type->type); +} + static int nft_objref_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -93,6 +122,7 @@ static const struct nft_expr_ops nft_objref_ops = { .activate = nft_objref_activate, .deactivate = nft_objref_deactivate, .dump = nft_objref_dump, + .validate = nft_objref_validate, .reduce = NFT_REDUCE_READONLY, }; @@ -197,6 +227,14 @@ static void nft_objref_map_destroy(const struct nft_ctx *ctx, nf_tables_destroy_set(ctx, priv->set); } +static int nft_objref_map_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + const struct nft_objref_map *priv = nft_expr_priv(expr); + + return nft_objref_validate_obj_type(ctx, priv->set->objtype); +} + static const struct nft_expr_ops nft_objref_map_ops = { .type = &nft_objref_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), @@ -206,6 +244,7 @@ static const struct nft_expr_ops nft_objref_map_ops = { .deactivate = nft_objref_map_deactivate, .destroy = nft_objref_map_destroy, .dump = nft_objref_map_dump, + .validate = nft_objref_map_validate, .reduce = NFT_REDUCE_READONLY, }; diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index 994a0a1efb589..cb2a672105dc1 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -27,11 +27,16 @@ /* Handle NCI Notification packets */ -static void nci_core_reset_ntf_packet(struct nci_dev *ndev, - const struct sk_buff *skb) +static int nci_core_reset_ntf_packet(struct nci_dev *ndev, + const struct sk_buff *skb) { /* Handle NCI 2.x core reset notification */ - const struct nci_core_reset_ntf *ntf = (void *)skb->data; + const struct nci_core_reset_ntf *ntf; + + if (skb->len < sizeof(struct nci_core_reset_ntf)) + return -EINVAL; + + ntf = (struct nci_core_reset_ntf *)skb->data; ndev->nci_ver = ntf->nci_ver; pr_debug("nci_ver 0x%x, config_status 0x%x\n", @@ -42,15 +47,22 @@ static void nci_core_reset_ntf_packet(struct nci_dev *ndev, __le32_to_cpu(ntf->manufact_specific_info); nci_req_complete(ndev, NCI_STATUS_OK); + + return 0; } -static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, - struct sk_buff *skb) +static int nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, + struct sk_buff *skb) { - struct nci_core_conn_credit_ntf *ntf = (void *) skb->data; + struct nci_core_conn_credit_ntf *ntf; struct nci_conn_info *conn_info; int i; + if (skb->len < sizeof(struct nci_core_conn_credit_ntf)) + return -EINVAL; + + ntf = (struct nci_core_conn_credit_ntf *)skb->data; + pr_debug("num_entries %d\n", ntf->num_entries); if (ntf->num_entries > NCI_MAX_NUM_CONN) @@ -68,7 +80,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, conn_info = nci_get_conn_info_by_conn_id(ndev, ntf->conn_entries[i].conn_id); if (!conn_info) - return; + return 0; atomic_add(ntf->conn_entries[i].credits, &conn_info->credits_cnt); @@ -77,12 +89,19 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, /* trigger the next tx */ if (!skb_queue_empty(&ndev->tx_q)) queue_work(ndev->tx_wq, &ndev->tx_work); + + return 0; } -static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev, - const struct sk_buff *skb) +static int nci_core_generic_error_ntf_packet(struct nci_dev *ndev, + const struct sk_buff *skb) { - __u8 status = skb->data[0]; + __u8 status; + + if (skb->len < 1) + return -EINVAL; + + status = skb->data[0]; pr_debug("status 0x%x\n", status); @@ -91,12 +110,19 @@ static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev, (the state remains the same) */ nci_req_complete(ndev, status); } + + return 0; } -static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev, - struct sk_buff *skb) +static int nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev, + struct sk_buff *skb) { - struct nci_core_intf_error_ntf *ntf = (void *) skb->data; + struct nci_core_intf_error_ntf *ntf; + + if (skb->len < sizeof(struct nci_core_intf_error_ntf)) + return -EINVAL; + + ntf = (struct nci_core_intf_error_ntf *)skb->data; ntf->conn_id = nci_conn_id(&ntf->conn_id); @@ -105,6 +131,8 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev, /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO); + + return 0; } static const __u8 * @@ -329,13 +357,18 @@ void nci_clear_target_list(struct nci_dev *ndev) ndev->n_targets = 0; } -static void nci_rf_discover_ntf_packet(struct nci_dev *ndev, - const struct sk_buff *skb) +static int nci_rf_discover_ntf_packet(struct nci_dev *ndev, + const struct sk_buff *skb) { struct nci_rf_discover_ntf ntf; - const __u8 *data = skb->data; + const __u8 *data; bool add_target = true; + if (skb->len < sizeof(struct nci_rf_discover_ntf)) + return -EINVAL; + + data = skb->data; + ntf.rf_discovery_id = *data++; ntf.rf_protocol = *data++; ntf.rf_tech_and_mode = *data++; @@ -390,6 +423,8 @@ static void nci_rf_discover_ntf_packet(struct nci_dev *ndev, nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); } + + return 0; } static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev, @@ -531,14 +566,19 @@ static int nci_store_general_bytes_nfc_dep(struct nci_dev *ndev, return NCI_STATUS_OK; } -static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, - const struct sk_buff *skb) +static int nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, + const struct sk_buff *skb) { struct nci_conn_info *conn_info; struct nci_rf_intf_activated_ntf ntf; - const __u8 *data = skb->data; + const __u8 *data; int err = NCI_STATUS_OK; + if (skb->len < sizeof(struct nci_rf_intf_activated_ntf)) + return -EINVAL; + + data = skb->data; + ntf.rf_discovery_id = *data++; ntf.rf_interface = *data++; ntf.rf_protocol = *data++; @@ -645,7 +685,7 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, if (err == NCI_STATUS_OK) { conn_info = ndev->rf_conn_info; if (!conn_info) - return; + return 0; conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size; conn_info->initial_num_credits = ntf.initial_num_credits; @@ -691,19 +731,26 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, pr_err("error when signaling tm activation\n"); } } + + return 0; } -static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, - const struct sk_buff *skb) +static int nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, + const struct sk_buff *skb) { const struct nci_conn_info *conn_info; - const struct nci_rf_deactivate_ntf *ntf = (void *)skb->data; + const struct nci_rf_deactivate_ntf *ntf; + + if (skb->len < sizeof(struct nci_rf_deactivate_ntf)) + return -EINVAL; + + ntf = (struct nci_rf_deactivate_ntf *)skb->data; pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason); conn_info = ndev->rf_conn_info; if (!conn_info) - return; + return 0; /* drop tx data queue */ skb_queue_purge(&ndev->tx_q); @@ -735,14 +782,20 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, } nci_req_complete(ndev, NCI_STATUS_OK); + + return 0; } -static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev, - const struct sk_buff *skb) +static int nci_nfcee_discover_ntf_packet(struct nci_dev *ndev, + const struct sk_buff *skb) { u8 status = NCI_STATUS_OK; - const struct nci_nfcee_discover_ntf *nfcee_ntf = - (struct nci_nfcee_discover_ntf *)skb->data; + const struct nci_nfcee_discover_ntf *nfcee_ntf; + + if (skb->len < sizeof(struct nci_nfcee_discover_ntf)) + return -EINVAL; + + nfcee_ntf = (struct nci_nfcee_discover_ntf *)skb->data; /* NFCForum NCI 9.2.1 HCI Network Specific Handling * If the NFCC supports the HCI Network, it SHALL return one, @@ -753,6 +806,8 @@ static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev, ndev->cur_params.id = nfcee_ntf->nfcee_id; nci_req_complete(ndev, status); + + return 0; } void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) @@ -779,35 +834,43 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) switch (ntf_opcode) { case NCI_OP_CORE_RESET_NTF: - nci_core_reset_ntf_packet(ndev, skb); + if (nci_core_reset_ntf_packet(ndev, skb)) + goto end; break; case NCI_OP_CORE_CONN_CREDITS_NTF: - nci_core_conn_credits_ntf_packet(ndev, skb); + if (nci_core_conn_credits_ntf_packet(ndev, skb)) + goto end; break; case NCI_OP_CORE_GENERIC_ERROR_NTF: - nci_core_generic_error_ntf_packet(ndev, skb); + if (nci_core_generic_error_ntf_packet(ndev, skb)) + goto end; break; case NCI_OP_CORE_INTF_ERROR_NTF: - nci_core_conn_intf_error_ntf_packet(ndev, skb); + if (nci_core_conn_intf_error_ntf_packet(ndev, skb)) + goto end; break; case NCI_OP_RF_DISCOVER_NTF: - nci_rf_discover_ntf_packet(ndev, skb); + if (nci_rf_discover_ntf_packet(ndev, skb)) + goto end; break; case NCI_OP_RF_INTF_ACTIVATED_NTF: - nci_rf_intf_activated_ntf_packet(ndev, skb); + if (nci_rf_intf_activated_ntf_packet(ndev, skb)) + goto end; break; case NCI_OP_RF_DEACTIVATE_NTF: - nci_rf_deactivate_ntf_packet(ndev, skb); + if (nci_rf_deactivate_ntf_packet(ndev, skb)) + goto end; break; case NCI_OP_NFCEE_DISCOVER_NTF: - nci_nfcee_discover_ntf_packet(ndev, skb); + if (nci_nfcee_discover_ntf_packet(ndev, skb)) + goto end; break; case NCI_OP_RF_NFCEE_ACTION_NTF: diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index c56a01992cb28..709d851d0b1e8 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -599,16 +599,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb, qdisc_skb_cb(skb)->pkt_len = pkt_len; } -void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc) -{ - if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { - pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", - txt, qdisc->ops->id, qdisc->handle >> 16); - qdisc->flags |= TCQ_F_WARN_NONWC; - } -} -EXPORT_SYMBOL(qdisc_warn_nonwc); - static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) { struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 5a7745170e84b..d8fd35da32a7c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -835,22 +835,6 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) } } -static unsigned int -qdisc_peek_len(struct Qdisc *sch) -{ - struct sk_buff *skb; - unsigned int len; - - skb = sch->ops->peek(sch); - if (unlikely(skb == NULL)) { - qdisc_warn_nonwc("qdisc_peek_len", sch); - return 0; - } - len = qdisc_pkt_len(skb); - - return len; -} - static void hfsc_adjust_levels(struct hfsc_class *cl) { diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 5a345ef35c9f6..5b43578493ef1 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1002,7 +1002,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg, if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ list_del_init(&cl->alist); - else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) { + else if (cl->deficit < qdisc_peek_len(cl->qdisc)) { cl->deficit += agg->lmax; list_move_tail(&cl->alist, &agg->active); } diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 5c16521818058..f5a7d5a387555 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -169,13 +169,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) chunk->head_skb = chunk->skb; /* skbs with "cover letter" */ - if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) + if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) { + if (WARN_ON(!skb_shinfo(chunk->skb)->frag_list)) { + __SCTP_INC_STATS(dev_net(chunk->skb->dev), + SCTP_MIB_IN_PKT_DISCARDS); + sctp_chunk_free(chunk); + goto next_chunk; + } chunk->skb = skb_shinfo(chunk->skb)->frag_list; - - if (WARN_ON(!chunk->skb)) { - __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS); - sctp_chunk_free(chunk); - goto next_chunk; } } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index f80208edd6a5c..96ca400120e60 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -31,6 +31,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include @@ -1796,7 +1797,7 @@ struct sctp_association *sctp_unpack_cookie( } } - if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { + if (crypto_memneq(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { *error = -SCTP_IERROR_BAD_SIG; goto fail; } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index a0524ba8d7878..dc66dff33d6d4 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -30,6 +30,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -885,7 +886,8 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net, return SCTP_DISPOSITION_CONSUME; nomem_authev: - sctp_ulpevent_free(ai_ev); + if (ai_ev) + sctp_ulpevent_free(ai_ev); nomem_aiev: sctp_ulpevent_free(ev); nomem_ev: @@ -4416,7 +4418,7 @@ static enum sctp_ierror sctp_sf_authenticate( sh_key, GFP_ATOMIC); /* Discard the packet if the digests do not match */ - if (memcmp(save_digest, digest, sig_len)) { + if (crypto_memneq(save_digest, digest, sig_len)) { kfree(save_digest); return SCTP_IERROR_BAD_SIG; } diff --git a/net/smc/smc_inet.c b/net/smc/smc_inet.c index a944e7dcb8b96..a94084b4a498e 100644 --- a/net/smc/smc_inet.c +++ b/net/smc/smc_inet.c @@ -56,7 +56,6 @@ static struct inet_protosw smc_inet_protosw = { .protocol = IPPROTO_SMC, .prot = &smc_inet_prot, .ops = &smc_inet_stream_ops, - .flags = INET_PROTOSW_ICSK, }; #if IS_ENABLED(CONFIG_IPV6) @@ -104,27 +103,15 @@ static struct inet_protosw smc_inet6_protosw = { .protocol = IPPROTO_SMC, .prot = &smc_inet6_prot, .ops = &smc_inet6_stream_ops, - .flags = INET_PROTOSW_ICSK, }; #endif /* CONFIG_IPV6 */ -static unsigned int smc_sync_mss(struct sock *sk, u32 pmtu) -{ - /* No need pass it through to clcsock, mss can always be set by - * sock_create_kern or smc_setsockopt. - */ - return 0; -} - static int smc_inet_init_sock(struct sock *sk) { struct net *net = sock_net(sk); /* init common smc sock */ smc_sk_init(net, sk, IPPROTO_SMC); - - inet_csk(sk)->icsk_sync_mss = smc_sync_mss; - /* create clcsock */ return smc_create_clcsk(net, sk, sk->sk_family); } diff --git a/net/smc/smc_loopback.c b/net/smc/smc_loopback.c index 3c5f64ca41153..85f0b7853b173 100644 --- a/net/smc/smc_loopback.c +++ b/net/smc/smc_loopback.c @@ -56,6 +56,7 @@ static int smc_lo_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb, { struct smc_lo_dmb_node *dmb_node, *tmp_node; struct smc_lo_dev *ldev = smcd->priv; + struct folio *folio; int sba_idx, rc; /* check space for new dmb */ @@ -74,13 +75,16 @@ static int smc_lo_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb, dmb_node->sba_idx = sba_idx; dmb_node->len = dmb->dmb_len; - dmb_node->cpu_addr = kzalloc(dmb_node->len, GFP_KERNEL | - __GFP_NOWARN | __GFP_NORETRY | - __GFP_NOMEMALLOC); - if (!dmb_node->cpu_addr) { + + /* not critical; fail under memory pressure and fallback to TCP */ + folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | + __GFP_NORETRY | __GFP_ZERO, + get_order(dmb_node->len)); + if (!folio) { rc = -ENOMEM; goto err_node; } + dmb_node->cpu_addr = folio_address(folio); dmb_node->dma_addr = SMC_DMA_ADDR_INVALID; refcount_set(&dmb_node->refcnt, 1); @@ -122,7 +126,7 @@ static void __smc_lo_unregister_dmb(struct smc_lo_dev *ldev, write_unlock_bh(&ldev->dmb_ht_lock); clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask); - kvfree(dmb_node->cpu_addr); + folio_put(virt_to_folio(dmb_node->cpu_addr)); kfree(dmb_node); if (atomic_dec_and_test(&ldev->dmb_cnt)) diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 73a90ad873fb9..2d5ac2b3d5269 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -724,7 +724,7 @@ svcauth_gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, rqstp->rq_auth_stat = rpc_autherr_badverf; return SVC_DENIED; } - if (flavor != RPC_AUTH_GSS) { + if (flavor != RPC_AUTH_GSS || checksum.len < XDR_UNIT) { rqstp->rq_auth_stat = rpc_autherr_badverf; return SVC_DENIED; } diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 43c57124de52f..67474470320cb 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -606,7 +606,8 @@ int svc_port_is_privileged(struct sockaddr *sin) } /* - * Make sure that we don't have too many active connections. If we have, + * Make sure that we don't have too many connections that have not yet + * demonstrated that they have access to the NFS server. If we have, * something must be dropped. It's not clear what will happen if we allow * "too many" connections, but when dealing with network-facing software, * we have to code defensively. Here we do that by imposing hard limits. @@ -625,27 +626,25 @@ int svc_port_is_privileged(struct sockaddr *sin) */ static void svc_check_conn_limits(struct svc_serv *serv) { - unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : - (serv->sv_nrthreads+3) * 20; + unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : 64; if (serv->sv_tmpcnt > limit) { - struct svc_xprt *xprt = NULL; + struct svc_xprt *xprt = NULL, *xprti; spin_lock_bh(&serv->sv_lock); if (!list_empty(&serv->sv_tempsocks)) { - /* Try to help the admin */ - net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", - serv->sv_name, serv->sv_maxconn ? - "max number of connections" : - "number of threads"); /* * Always select the oldest connection. It's not fair, - * but so is life + * but nor is life. */ - xprt = list_entry(serv->sv_tempsocks.prev, - struct svc_xprt, - xpt_list); - set_bit(XPT_CLOSE, &xprt->xpt_flags); - svc_xprt_get(xprt); + list_for_each_entry_reverse(xprti, &serv->sv_tempsocks, + xpt_list) { + if (!test_bit(XPT_PEER_VALID, &xprti->xpt_flags)) { + xprt = xprti; + set_bit(XPT_CLOSE, &xprt->xpt_flags); + svc_xprt_get(xprt); + break; + } + } } spin_unlock_bh(&serv->sv_lock); @@ -1029,6 +1028,19 @@ static void svc_delete_xprt(struct svc_xprt *xprt) struct svc_serv *serv = xprt->xpt_server; struct svc_deferred_req *dr; + /* unregister with rpcbind for when transport type is TCP or UDP. + */ + if (test_bit(XPT_RPCB_UNREG, &xprt->xpt_flags)) { + struct svc_sock *svsk = container_of(xprt, struct svc_sock, + sk_xprt); + struct socket *sock = svsk->sk_sock; + + if (svc_register(serv, xprt->xpt_net, sock->sk->sk_family, + sock->sk->sk_protocol, 0) < 0) + pr_warn("failed to unregister %s with rpcbind\n", + xprt->xpt_class->xcl_name); + } + if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) return; @@ -1039,7 +1051,8 @@ static void svc_delete_xprt(struct svc_xprt *xprt) spin_lock_bh(&serv->sv_lock); list_del_init(&xprt->xpt_list); - if (test_bit(XPT_TEMP, &xprt->xpt_flags)) + if (test_bit(XPT_TEMP, &xprt->xpt_flags) && + !test_bit(XPT_PEER_VALID, &xprt->xpt_flags)) serv->sv_tmpcnt--; spin_unlock_bh(&serv->sv_lock); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index e61e945760582..443d8390ebf1c 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -837,6 +837,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) /* data might have come in before data_ready set up */ set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); + set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags); /* make sure we get destination address info */ switch (svsk->sk_sk->sk_family) { @@ -1357,6 +1358,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) if (sk->sk_state == TCP_LISTEN) { strcpy(svsk->sk_xprt.xpt_remotebuf, "listener"); set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags); + set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags); sk->sk_data_ready = svc_tcp_listen_data_ready; set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); } else { diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 0acf313deb01f..e52e4c91c9091 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -255,12 +255,9 @@ int tls_process_cmsg(struct sock *sk, struct msghdr *msg, if (msg->msg_flags & MSG_MORE) return -EINVAL; - rc = tls_handle_open_record(sk, msg->msg_flags); - if (rc) - return rc; - *record_type = *(unsigned char *)CMSG_DATA(cmsg); - rc = 0; + + rc = tls_handle_open_record(sk, msg->msg_flags); break; default: return -EINVAL; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index f46550b96061e..1ff0d01bdadf0 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1054,7 +1054,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, if (ret == -EINPROGRESS) num_async++; else if (ret != -EAGAIN) - goto send_end; + goto end; } } @@ -1112,8 +1112,11 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, goto send_end; tls_ctx->pending_open_record_frags = true; - if (sk_msg_full(msg_pl)) + if (sk_msg_full(msg_pl)) { full_record = true; + sk_msg_trim(sk, msg_en, + msg_pl->sg.size + prot->overhead_size); + } if (full_record || eor) goto copied; @@ -1149,6 +1152,13 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, } else if (ret != -EAGAIN) goto send_end; } + + /* Transmit if any encryptions have completed */ + if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { + cancel_delayed_work(&ctx->tx_work.work); + tls_tx_records(sk, msg->msg_flags); + } + continue; rollback_iter: copied -= try_to_copy; @@ -1204,6 +1214,12 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, goto send_end; } } + + /* Transmit if any encryptions have completed */ + if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { + cancel_delayed_work(&ctx->tx_work.work); + tls_tx_records(sk, msg->msg_flags); + } } continue; @@ -1223,8 +1239,9 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, goto alloc_encrypted; } +send_end: if (!num_async) { - goto send_end; + goto end; } else if (num_zc || eor) { int err; @@ -1242,7 +1259,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, tls_tx_records(sk, msg->msg_flags); } -send_end: +end: ret = sk_stream_error(sk, msg->msg_flags, ret); return copied > 0 ? copied : ret; } @@ -1633,8 +1650,10 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, if (unlikely(darg->async)) { err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold); - if (err) - __skb_queue_tail(&ctx->async_hold, darg->skb); + if (err) { + err = tls_decrypt_async_wait(ctx); + darg->async = false; + } return err; } diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ef519b55a3d9a..68a9d4214584f 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -487,12 +487,26 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) goto err; } - if (vsk->transport) { - if (vsk->transport == new_transport) { - ret = 0; - goto err; - } + if (vsk->transport && vsk->transport == new_transport) { + ret = 0; + goto err; + } + /* We increase the module refcnt to prevent the transport unloading + * while there are open sockets assigned to it. + */ + if (!new_transport || !try_module_get(new_transport->module)) { + ret = -ENODEV; + goto err; + } + + /* It's safe to release the mutex after a successful try_module_get(). + * Whichever transport `new_transport` points at, it won't go away until + * the last module_put() below or in vsock_deassign_transport(). + */ + mutex_unlock(&vsock_register_mutex); + + if (vsk->transport) { /* transport->release() must be called with sock lock acquired. * This path can only be taken during vsock_connect(), where we * have already held the sock lock. In the other cases, this @@ -512,20 +526,6 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) vsk->peer_shutdown = 0; } - /* We increase the module refcnt to prevent the transport unloading - * while there are open sockets assigned to it. - */ - if (!new_transport || !try_module_get(new_transport->module)) { - ret = -ENODEV; - goto err; - } - - /* It's safe to release the mutex after a successful try_module_get(). - * Whichever transport `new_transport` points at, it won't go away until - * the last module_put() below or in vsock_deassign_transport(). - */ - mutex_unlock(&vsock_register_mutex); - if (sk->sk_type == SOCK_SEQPACKET) { if (!new_transport->seqpacket_allow || !new_transport->seqpacket_allow(remote_cid)) { diff --git a/net/wireless/reg.c b/net/wireless/reg.c index f6846eb0f4b84..9f8428dc418a5 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -4234,6 +4234,8 @@ static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev) struct wireless_dev *wdev; unsigned int link_id; + wiphy_lock(&rdev->wiphy); + /* If we finished CAC or received radar, we should end any * CAC running on the same channels. * the check !cfg80211_chandef_dfs_usable contain 2 options: @@ -4258,6 +4260,8 @@ static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev) rdev_end_cac(rdev, wdev->netdev, link_id); } } + + wiphy_unlock(&rdev->wiphy); } void regulatory_propagate_dfs_state(struct wiphy *wiphy, diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 406b20dfee8d4..9351e1298ef40 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -143,14 +143,24 @@ static inline bool xp_unused_options_set(u32 options) static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { - u64 addr = desc->addr - pool->tx_metadata_len; - u64 len = desc->len + pool->tx_metadata_len; - u64 offset = addr & (pool->chunk_size - 1); + u64 len = desc->len; + u64 addr, offset; - if (!desc->len) + if (!len) return false; - if (offset + len > pool->chunk_size) + /* Can overflow if desc->addr < pool->tx_metadata_len */ + if (check_sub_overflow(desc->addr, pool->tx_metadata_len, &addr)) + return false; + + offset = addr & (pool->chunk_size - 1); + + /* + * Can't overflow: @offset is guaranteed to be < ``U32_MAX`` + * (pool->chunk_size is ``u32``), @len is guaranteed + * to be <= ``U32_MAX``. + */ + if (offset + len + pool->tx_metadata_len > pool->chunk_size) return false; if (addr >= pool->addrs_cnt) @@ -158,27 +168,42 @@ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, if (xp_unused_options_set(desc->options)) return false; + return true; } static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { - u64 addr = xp_unaligned_add_offset_to_addr(desc->addr) - pool->tx_metadata_len; - u64 len = desc->len + pool->tx_metadata_len; + u64 len = desc->len; + u64 addr, end; - if (!desc->len) + if (!len) return false; + /* Can't overflow: @len is guaranteed to be <= ``U32_MAX`` */ + len += pool->tx_metadata_len; if (len > pool->chunk_size) return false; - if (addr >= pool->addrs_cnt || addr + len > pool->addrs_cnt || - xp_desc_crosses_non_contig_pg(pool, addr, len)) + /* Can overflow if desc->addr is close to 0 */ + if (check_sub_overflow(xp_unaligned_add_offset_to_addr(desc->addr), + pool->tx_metadata_len, &addr)) + return false; + + if (addr >= pool->addrs_cnt) + return false; + + /* Can overflow if pool->addrs_cnt is high enough */ + if (check_add_overflow(addr, len, &end) || end > pool->addrs_cnt) + return false; + + if (xp_desc_crosses_non_contig_pg(pool, addr, len)) return false; if (xp_unused_options_set(desc->options)) return false; + return true; } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 6f99fd2d966c6..1e2f5ecd63248 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2502,6 +2502,8 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high, for (h = 0; h < range; h++) { u32 spi = (low == high) ? low : get_random_u32_inclusive(low, high); + if (spi == 0) + goto next; newspi = htonl(spi); spin_lock_bh(&net->xfrm.xfrm_state_lock); @@ -2517,6 +2519,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high, xfrm_state_put(x0); spin_unlock_bh(&net->xfrm.xfrm_state_lock); +next: if (signal_pending(current)) { err = -ERESTARTSYS; goto unlock; diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index a80783fcbe042..8b97919a86e2d 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -33,3 +33,4 @@ const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO; const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM; const gfp_t RUST_CONST_HELPER___GFP_NOWARN = ___GFP_NOWARN; const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL; +const vm_flags_t RUST_CONST_HELPER_VM_MERGEABLE = VM_MERGEABLE; diff --git a/rust/kernel/block/mq/gen_disk.rs b/rust/kernel/block/mq/gen_disk.rs index c6df153ebb886..8cd47ddd1dbb5 100644 --- a/rust/kernel/block/mq/gen_disk.rs +++ b/rust/kernel/block/mq/gen_disk.rs @@ -3,7 +3,7 @@ //! Generic disk abstraction. //! //! C header: [`include/linux/blkdev.h`](srctree/include/linux/blkdev.h) -//! C header: [`include/linux/blk_mq.h`](srctree/include/linux/blk_mq.h) +//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h) use crate::block::mq::{raw_writer::RawWriter, Operations, TagSet}; use crate::{bindings, error::from_err_ptr, error::Result, sync::Arc}; diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h index ef12c8f929eda..70c67061cc448 100644 --- a/scripts/gcc-plugins/gcc-common.h +++ b/scripts/gcc-plugins/gcc-common.h @@ -191,10 +191,17 @@ inline bool is_a_helper::test(const_gimple gs) } #endif +#if BUILDING_GCC_VERSION < 16000 #define TODO_verify_ssa TODO_verify_il #define TODO_verify_flow TODO_verify_il #define TODO_verify_stmts TODO_verify_il #define TODO_verify_rtl_sharing TODO_verify_il +#else +#define TODO_verify_ssa 0 +#define TODO_verify_flow 0 +#define TODO_verify_stmts 0 +#define TODO_verify_rtl_sharing 0 +#endif #define INSN_DELETED_P(insn) (insn)->deleted() diff --git a/security/Kconfig b/security/Kconfig index 28e685f53bd1a..ce9f1a651ccc3 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -264,6 +264,7 @@ endchoice config LSM string "Ordered list of enabled LSMs" + depends on SECURITY default "landlock,lockdown,yama,loadpin,safesetid,smack,selinux,tomoyo,apparmor,ipe,bpf" if DEFAULT_SECURITY_SMACK default "landlock,lockdown,yama,loadpin,safesetid,apparmor,selinux,smack,tomoyo,ipe,bpf" if DEFAULT_SECURITY_APPARMOR default "landlock,lockdown,yama,loadpin,safesetid,tomoyo,ipe,bpf" if DEFAULT_SECURITY_TOMOYO diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c index 89c9798d18007..e73f2c6c817a0 100644 --- a/security/keys/trusted-keys/trusted_tpm1.c +++ b/security/keys/trusted-keys/trusted_tpm1.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -241,7 +242,7 @@ int TSS_checkhmac1(unsigned char *buffer, if (ret < 0) goto out; - if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) + if (crypto_memneq(testhmac, authdata, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kfree_sensitive(sdesc); @@ -334,7 +335,7 @@ static int TSS_checkhmac2(unsigned char *buffer, TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0); if (ret < 0) goto out; - if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) { + if (crypto_memneq(testhmac1, authdata1, SHA1_DIGEST_SIZE)) { ret = -EINVAL; goto out; } @@ -343,7 +344,7 @@ static int TSS_checkhmac2(unsigned char *buffer, TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0); if (ret < 0) goto out; - if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) + if (crypto_memneq(testhmac2, authdata2, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kfree_sensitive(sdesc); diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 9b91f68b3fff0..d15de21f6ebf0 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -84,19 +84,24 @@ void snd_pcm_group_init(struct snd_pcm_group *group) } /* define group lock helpers */ -#define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \ +#define DEFINE_PCM_GROUP_LOCK(action, bh_lock, bh_unlock, mutex_action) \ static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \ { \ - if (nonatomic) \ + if (nonatomic) { \ mutex_ ## mutex_action(&group->mutex); \ - else \ - spin_ ## action(&group->lock); \ -} - -DEFINE_PCM_GROUP_LOCK(lock, lock); -DEFINE_PCM_GROUP_LOCK(unlock, unlock); -DEFINE_PCM_GROUP_LOCK(lock_irq, lock); -DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock); + } else { \ + if (IS_ENABLED(CONFIG_PREEMPT_RT) && bh_lock) \ + local_bh_disable(); \ + spin_ ## action(&group->lock); \ + if (IS_ENABLED(CONFIG_PREEMPT_RT) && bh_unlock) \ + local_bh_enable(); \ + } \ +} + +DEFINE_PCM_GROUP_LOCK(lock, false, false, lock); +DEFINE_PCM_GROUP_LOCK(unlock, false, false, unlock); +DEFINE_PCM_GROUP_LOCK(lock_irq, true, false, lock); +DEFINE_PCM_GROUP_LOCK(unlock_irq, false, true, unlock); /** * snd_pcm_stream_lock - Lock the PCM stream diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h index 775db3fc4959f..ec10270c2cce3 100644 --- a/sound/firewire/amdtp-stream.h +++ b/sound/firewire/amdtp-stream.h @@ -32,7 +32,7 @@ * allows 5 times as large as IEC 61883-6 defines. * @CIP_HEADER_WITHOUT_EOH: Only for in-stream. CIP Header doesn't include * valid EOH. - * @CIP_NO_HEADERS: a lack of headers in packets + * @CIP_NO_HEADER: a lack of headers in packets * @CIP_UNALIGHED_DBC: Only for in-stream. The value of dbc is not alighed to * the value of current SYT_INTERVAL; e.g. initial value is not zero. * @CIP_UNAWARE_SYT: For outgoing packet, the value in SYT field of CIP is 0xffff. diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 5f061d2d9fc96..a41df821e15f7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -7272,6 +7272,11 @@ static void cs35l41_fixup_spi_two(struct hda_codec *codec, const struct hda_fixu comp_generic_fixup(codec, action, "spi", "CSC3551", "-%s:00-cs35l41-hda.%d", 2); } +static void cs35l41_fixup_spi_one(struct hda_codec *codec, const struct hda_fixup *fix, int action) +{ + comp_generic_fixup(codec, action, "spi", "CSC3551", "-%s:00-cs35l41-hda.%d", 1); +} + static void cs35l41_fixup_spi_four(struct hda_codec *codec, const struct hda_fixup *fix, int action) { comp_generic_fixup(codec, action, "spi", "CSC3551", "-%s:00-cs35l41-hda.%d", 4); @@ -7956,6 +7961,7 @@ enum { ALC287_FIXUP_CS35L41_I2C_2, ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED, ALC287_FIXUP_CS35L41_I2C_4, + ALC245_FIXUP_CS35L41_SPI_1, ALC245_FIXUP_CS35L41_SPI_2, ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED, ALC245_FIXUP_CS35L41_SPI_4, @@ -10067,6 +10073,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_spi_two, }, + [ALC245_FIXUP_CS35L41_SPI_1] = { + .type = HDA_FIXUP_FUNC, + .v.func = cs35l41_fixup_spi_one, + }, [ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_spi_two, @@ -11001,6 +11011,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101), + SND_PCI_QUIRK(0x1043, 0x88f4, "ASUS NUC14LNS", ALC245_FIXUP_CS35L41_SPI_1), SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c index 9d95ecb299aed..a99acd1125e74 100644 --- a/sound/pci/lx6464es/lx_core.c +++ b/sound/pci/lx6464es/lx_core.c @@ -316,7 +316,7 @@ static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh) /* low-level dsp access */ int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version) { - u16 ret; + int ret; mutex_lock(&chip->msg_lock); @@ -330,10 +330,10 @@ int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version) int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq) { - u16 ret = 0; u32 freq_raw = 0; u32 freq = 0; u32 frequency = 0; + int ret; mutex_lock(&chip->msg_lock); diff --git a/sound/soc/amd/acp/acp-sdw-sof-mach.c b/sound/soc/amd/acp/acp-sdw-sof-mach.c index 99a244f495bd3..876f0b7fcd3de 100644 --- a/sound/soc/amd/acp/acp-sdw-sof-mach.c +++ b/sound/soc/amd/acp/acp-sdw-sof-mach.c @@ -216,9 +216,9 @@ static int create_sdw_dailink(struct snd_soc_card *card, cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SDW%d Pin%d", link_num, cpu_pin_id); - dev_dbg(dev, "cpu->dai_name:%s\n", cpus->dai_name); if (!cpus->dai_name) return -ENOMEM; + dev_dbg(dev, "cpu->dai_name:%s\n", cpus->dai_name); codec_maps[j].cpu = 0; codec_maps[j].codec = j; diff --git a/sound/soc/amd/acp/amd.h b/sound/soc/amd/acp/amd.h index 854269fea875f..aa0aa64202fe4 100644 --- a/sound/soc/amd/acp/amd.h +++ b/sound/soc/amd/acp/amd.h @@ -135,7 +135,7 @@ #define PDM_DMA_INTR_MASK 0x10000 #define PDM_DEC_64 0x2 #define PDM_CLK_FREQ_MASK 0x07 -#define PDM_MISC_CTRL_MASK 0x10 +#define PDM_MISC_CTRL_MASK 0x18 #define PDM_ENABLE 0x01 #define PDM_DISABLE 0x00 #define DMA_EN_MASK 0x02 diff --git a/sound/soc/codecs/idt821034.c b/sound/soc/codecs/idt821034.c index cb7a68c799f8f..401d0897b8ab4 100644 --- a/sound/soc/codecs/idt821034.c +++ b/sound/soc/codecs/idt821034.c @@ -548,14 +548,14 @@ static int idt821034_kctrl_mute_put(struct snd_kcontrol *kcontrol, return ret; } -static const DECLARE_TLV_DB_LINEAR(idt821034_gain_in, -6520, 1306); -#define IDT821034_GAIN_IN_MIN_RAW 1 /* -65.20 dB -> 10^(-65.2/20.0) * 1820 = 1 */ -#define IDT821034_GAIN_IN_MAX_RAW 8191 /* 13.06 dB -> 10^(13.06/20.0) * 1820 = 8191 */ +static const DECLARE_TLV_DB_LINEAR(idt821034_gain_in, -300, 1300); +#define IDT821034_GAIN_IN_MIN_RAW 1288 /* -3.0 dB -> 10^(-3.0/20.0) * 1820 = 1288 */ +#define IDT821034_GAIN_IN_MAX_RAW 8130 /* 13.0 dB -> 10^(13.0/20.0) * 1820 = 8130 */ #define IDT821034_GAIN_IN_INIT_RAW 1820 /* 0dB -> 10^(0/20) * 1820 = 1820 */ -static const DECLARE_TLV_DB_LINEAR(idt821034_gain_out, -6798, 1029); -#define IDT821034_GAIN_OUT_MIN_RAW 1 /* -67.98 dB -> 10^(-67.98/20.0) * 2506 = 1*/ -#define IDT821034_GAIN_OUT_MAX_RAW 8191 /* 10.29 dB -> 10^(10.29/20.0) * 2506 = 8191 */ +static const DECLARE_TLV_DB_LINEAR(idt821034_gain_out, -1300, 300); +#define IDT821034_GAIN_OUT_MIN_RAW 561 /* -13.0 dB -> 10^(-13.0/20.0) * 2506 = 561 */ +#define IDT821034_GAIN_OUT_MAX_RAW 3540 /* 3.0 dB -> 10^(3.0/20.0) * 2506 = 3540 */ #define IDT821034_GAIN_OUT_INIT_RAW 2506 /* 0dB -> 10^(0/20) * 2506 = 2506 */ static const struct snd_kcontrol_new idt821034_controls[] = { diff --git a/sound/soc/codecs/nau8821.c b/sound/soc/codecs/nau8821.c index de5c4db05c8f8..bfb719ca4c2cf 100644 --- a/sound/soc/codecs/nau8821.c +++ b/sound/soc/codecs/nau8821.c @@ -26,7 +26,8 @@ #include #include "nau8821.h" -#define NAU8821_JD_ACTIVE_HIGH BIT(0) +#define NAU8821_QUIRK_JD_ACTIVE_HIGH BIT(0) +#define NAU8821_QUIRK_JD_DB_BYPASS BIT(1) static int nau8821_quirk; static int quirk_override = -1; @@ -1022,12 +1023,17 @@ static bool nau8821_is_jack_inserted(struct regmap *regmap) return active_high == is_high; } -static void nau8821_int_status_clear_all(struct regmap *regmap) +static void nau8821_irq_status_clear(struct regmap *regmap, int active_irq) { - int active_irq, clear_irq, i; + int clear_irq, i; - /* Reset the intrruption status from rightmost bit if the corres- - * ponding irq event occurs. + if (active_irq) { + regmap_write(regmap, NAU8821_R11_INT_CLR_KEY_STATUS, active_irq); + return; + } + + /* Reset the interruption status from rightmost bit if the + * corresponding irq event occurs. */ regmap_read(regmap, NAU8821_R10_IRQ_STATUS, &active_irq); for (i = 0; i < NAU8821_REG_DATA_LEN; i++) { @@ -1054,7 +1060,7 @@ static void nau8821_eject_jack(struct nau8821 *nau8821) snd_soc_dapm_sync(dapm); /* Clear all interruption status */ - nau8821_int_status_clear_all(regmap); + nau8821_irq_status_clear(regmap, 0); /* Enable the insertion interruption, disable the ejection inter- * ruption, and then bypass de-bounce circuit. @@ -1161,9 +1167,10 @@ static void nau8821_setup_inserted_irq(struct nau8821 *nau8821) regmap_update_bits(regmap, NAU8821_R1D_I2S_PCM_CTRL2, NAU8821_I2S_MS_MASK, NAU8821_I2S_MS_SLAVE); - /* Not bypass de-bounce circuit */ - regmap_update_bits(regmap, NAU8821_R0D_JACK_DET_CTRL, - NAU8821_JACK_DET_DB_BYPASS, 0); + /* Do not bypass de-bounce circuit */ + if (!(nau8821_quirk & NAU8821_QUIRK_JD_DB_BYPASS)) + regmap_update_bits(regmap, NAU8821_R0D_JACK_DET_CTRL, + NAU8821_JACK_DET_DB_BYPASS, 0); regmap_update_bits(regmap, NAU8821_R0F_INTERRUPT_MASK, NAU8821_IRQ_EJECT_EN, 0); @@ -1186,6 +1193,7 @@ static irqreturn_t nau8821_interrupt(int irq, void *data) if ((active_irq & NAU8821_JACK_EJECT_IRQ_MASK) == NAU8821_JACK_EJECT_DETECTED) { + cancel_work_sync(&nau8821->jdet_work); regmap_update_bits(regmap, NAU8821_R71_ANALOG_ADC_1, NAU8821_MICDET_MASK, NAU8821_MICDET_DIS); nau8821_eject_jack(nau8821); @@ -1200,11 +1208,11 @@ static irqreturn_t nau8821_interrupt(int irq, void *data) clear_irq = NAU8821_KEY_RELEASE_IRQ; } else if ((active_irq & NAU8821_JACK_INSERT_IRQ_MASK) == NAU8821_JACK_INSERT_DETECTED) { + cancel_work_sync(&nau8821->jdet_work); regmap_update_bits(regmap, NAU8821_R71_ANALOG_ADC_1, NAU8821_MICDET_MASK, NAU8821_MICDET_EN); if (nau8821_is_jack_inserted(regmap)) { /* detect microphone and jack type */ - cancel_work_sync(&nau8821->jdet_work); schedule_work(&nau8821->jdet_work); /* Turn off insertion interruption at manual mode */ regmap_update_bits(regmap, @@ -1522,7 +1530,7 @@ static int nau8821_resume_setup(struct nau8821 *nau8821) nau8821_configure_sysclk(nau8821, NAU8821_CLK_DIS, 0); if (nau8821->irq) { /* Clear all interruption status */ - nau8821_int_status_clear_all(regmap); + nau8821_irq_status_clear(regmap, 0); /* Enable both insertion and ejection interruptions, and then * bypass de-bounce circuit. @@ -1857,7 +1865,23 @@ static const struct dmi_system_id nau8821_quirk_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"), DMI_MATCH(DMI_BOARD_NAME, "CW14Q01P-V2"), }, - .driver_data = (void *)(NAU8821_JD_ACTIVE_HIGH), + .driver_data = (void *)(NAU8821_QUIRK_JD_ACTIVE_HIGH), + }, + { + /* Valve Steam Deck LCD */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Valve"), + DMI_MATCH(DMI_PRODUCT_NAME, "Jupiter"), + }, + .driver_data = (void *)(NAU8821_QUIRK_JD_DB_BYPASS), + }, + { + /* Valve Steam Deck OLED */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Valve"), + DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"), + }, + .driver_data = (void *)(NAU8821_QUIRK_JD_DB_BYPASS), }, {} }; @@ -1899,9 +1923,12 @@ static int nau8821_i2c_probe(struct i2c_client *i2c) nau8821_check_quirks(); - if (nau8821_quirk & NAU8821_JD_ACTIVE_HIGH) + if (nau8821_quirk & NAU8821_QUIRK_JD_ACTIVE_HIGH) nau8821->jkdet_polarity = 0; + if (nau8821_quirk & NAU8821_QUIRK_JD_DB_BYPASS) + dev_dbg(dev, "Force bypassing jack detection debounce circuit\n"); + nau8821_print_device_properties(nau8821); nau8821_reset_chip(nau8821->regmap); diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c index ce2e88e066f3e..d773c96e2543c 100644 --- a/sound/soc/codecs/rt5682s.c +++ b/sound/soc/codecs/rt5682s.c @@ -653,14 +653,15 @@ static void rt5682s_sar_power_mode(struct snd_soc_component *component, int mode switch (mode) { case SAR_PWR_SAVING: snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_3, - RT5682S_CBJ_IN_BUF_MASK, RT5682S_CBJ_IN_BUF_DIS); + RT5682S_CBJ_IN_BUF_MASK, RT5682S_CBJ_IN_BUF_EN); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, - RT5682S_MB1_PATH_MASK | RT5682S_MB2_PATH_MASK, - RT5682S_CTRL_MB1_REG | RT5682S_CTRL_MB2_REG); + RT5682S_MB1_PATH_MASK | RT5682S_MB2_PATH_MASK | + RT5682S_VREF_POW_MASK, RT5682S_CTRL_MB1_FSM | + RT5682S_CTRL_MB2_FSM | RT5682S_VREF_POW_FSM); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK | RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS | - RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU); + RT5682S_SAR_BUTDET_POW_NORM | RT5682S_SAR_SEL_MB1_2_MANU); usleep_range(5000, 5500); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK, RT5682S_SAR_BUTDET_EN); @@ -688,7 +689,7 @@ static void rt5682s_sar_power_mode(struct snd_soc_component *component, int mode snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK | RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS | - RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU); + RT5682S_SAR_BUTDET_POW_NORM | RT5682S_SAR_SEL_MB1_2_MANU); break; default: dev_err(component->dev, "Invalid SAR Power mode: %d\n", mode); @@ -725,7 +726,7 @@ static void rt5682s_disable_push_button_irq(struct snd_soc_component *component) snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK | RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS | - RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU); + RT5682S_SAR_BUTDET_POW_NORM | RT5682S_SAR_SEL_MB1_2_MANU); } /** @@ -786,7 +787,7 @@ static int rt5682s_headset_detect(struct snd_soc_component *component, int jack_ jack_type = SND_JACK_HEADSET; snd_soc_component_write(component, RT5682S_SAR_IL_CMD_3, 0x024c); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, - RT5682S_FAST_OFF_MASK, RT5682S_FAST_OFF_EN); + RT5682S_FAST_OFF_MASK, RT5682S_FAST_OFF_DIS); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_SEL_MB1_2_MASK, val << RT5682S_SAR_SEL_MB1_2_SFT); rt5682s_enable_push_button_irq(component); @@ -966,7 +967,7 @@ static int rt5682s_set_jack_detect(struct snd_soc_component *component, RT5682S_EMB_JD_MASK | RT5682S_DET_TYPE | RT5682S_POL_FAST_OFF_MASK | RT5682S_MIC_CAP_MASK, RT5682S_EMB_JD_EN | RT5682S_DET_TYPE | - RT5682S_POL_FAST_OFF_HIGH | RT5682S_MIC_CAP_HS); + RT5682S_POL_FAST_OFF_LOW | RT5682S_MIC_CAP_HS); regmap_update_bits(rt5682s->regmap, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_POW_MASK, RT5682S_SAR_POW_EN); regmap_update_bits(rt5682s->regmap, RT5682S_GPIO_CTRL_1, diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c index c7f1b28f3b230..01f8d1296b183 100644 --- a/sound/soc/codecs/wcd934x.c +++ b/sound/soc/codecs/wcd934x.c @@ -5845,6 +5845,13 @@ static const struct snd_soc_component_driver wcd934x_component_drv = { .endianness = 1, }; +static void wcd934x_put_device_action(void *data) +{ + struct device *dev = data; + + put_device(dev); +} + static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd) { struct device *dev = &wcd->sdev->dev; @@ -5861,11 +5868,13 @@ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd) return dev_err_probe(dev, -EINVAL, "Unable to get SLIM Interface device\n"); slim_get_logical_addr(wcd->sidev); - wcd->if_regmap = regmap_init_slimbus(wcd->sidev, + wcd->if_regmap = devm_regmap_init_slimbus(wcd->sidev, &wcd934x_ifc_regmap_config); - if (IS_ERR(wcd->if_regmap)) + if (IS_ERR(wcd->if_regmap)) { + put_device(&wcd->sidev->dev); return dev_err_probe(dev, PTR_ERR(wcd->if_regmap), "Failed to allocate ifc register map\n"); + } of_property_read_u32(dev->parent->of_node, "qcom,dmic-sample-rate", &wcd->dmic_sample_rate); @@ -5907,6 +5916,10 @@ static int wcd934x_codec_probe(struct platform_device *pdev) if (ret) return ret; + ret = devm_add_action_or_reset(dev, wcd934x_put_device_action, &wcd->sidev->dev); + if (ret) + return ret; + /* set default rate 9P6MHz */ regmap_update_bits(wcd->regmap, WCD934X_CODEC_RPM_CLK_MCLK_CFG, WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK, diff --git a/sound/soc/codecs/wcd937x.c b/sound/soc/codecs/wcd937x.c index 1df827a084cac..17ddd91eac5fc 100644 --- a/sound/soc/codecs/wcd937x.c +++ b/sound/soc/codecs/wcd937x.c @@ -2039,9 +2039,9 @@ static const struct snd_kcontrol_new wcd937x_snd_controls[] = { SOC_ENUM_EXT("RX HPH Mode", rx_hph_mode_mux_enum, wcd937x_rx_hph_mode_get, wcd937x_rx_hph_mode_put), - SOC_SINGLE_EXT("HPHL_COMP Switch", SND_SOC_NOPM, 0, 1, 0, + SOC_SINGLE_EXT("HPHL_COMP Switch", WCD937X_COMP_L, 0, 1, 0, wcd937x_get_compander, wcd937x_set_compander), - SOC_SINGLE_EXT("HPHR_COMP Switch", SND_SOC_NOPM, 1, 1, 0, + SOC_SINGLE_EXT("HPHR_COMP Switch", WCD937X_COMP_R, 1, 1, 0, wcd937x_get_compander, wcd937x_set_compander), SOC_SINGLE_TLV("HPHL Volume", WCD937X_HPH_L_EN, 0, 20, 1, line_gain), diff --git a/sound/soc/codecs/wcd937x.h b/sound/soc/codecs/wcd937x.h index 4afa48dcaf743..7bfd0f630fcdd 100644 --- a/sound/soc/codecs/wcd937x.h +++ b/sound/soc/codecs/wcd937x.h @@ -548,21 +548,21 @@ int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd, struct device *wcd937x_sdw_device_get(struct device_node *np); #else -int wcd937x_sdw_free(struct wcd937x_sdw_priv *wcd, +static inline int wcd937x_sdw_free(struct wcd937x_sdw_priv *wcd, struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { return -EOPNOTSUPP; } -int wcd937x_sdw_set_sdw_stream(struct wcd937x_sdw_priv *wcd, +static inline int wcd937x_sdw_set_sdw_stream(struct wcd937x_sdw_priv *wcd, struct snd_soc_dai *dai, void *stream, int direction) { return -EOPNOTSUPP; } -int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd, +static inline int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index d3327bc237b5f..7975dc0ceb351 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -47,7 +47,8 @@ enum { BYT_CHT_ES8316_INTMIC_IN2_MAP, }; -#define BYT_CHT_ES8316_MAP(quirk) ((quirk) & GENMASK(3, 0)) +#define BYT_CHT_ES8316_MAP_MASK GENMASK(3, 0) +#define BYT_CHT_ES8316_MAP(quirk) ((quirk) & BYT_CHT_ES8316_MAP_MASK) #define BYT_CHT_ES8316_SSP0 BIT(16) #define BYT_CHT_ES8316_MONO_SPEAKER BIT(17) #define BYT_CHT_ES8316_JD_INVERTED BIT(18) @@ -60,10 +61,23 @@ MODULE_PARM_DESC(quirk, "Board-specific quirk override"); static void log_quirks(struct device *dev) { - if (BYT_CHT_ES8316_MAP(quirk) == BYT_CHT_ES8316_INTMIC_IN1_MAP) + int map; + + map = BYT_CHT_ES8316_MAP(quirk); + switch (map) { + case BYT_CHT_ES8316_INTMIC_IN1_MAP: dev_info(dev, "quirk IN1_MAP enabled"); - if (BYT_CHT_ES8316_MAP(quirk) == BYT_CHT_ES8316_INTMIC_IN2_MAP) + break; + case BYT_CHT_ES8316_INTMIC_IN2_MAP: dev_info(dev, "quirk IN2_MAP enabled"); + break; + default: + dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to INTMIC_IN1_MAP\n", map); + quirk &= ~BYT_CHT_ES8316_MAP_MASK; + quirk |= BYT_CHT_ES8316_INTMIC_IN1_MAP; + break; + } + if (quirk & BYT_CHT_ES8316_SSP0) dev_info(dev, "quirk SSP0 enabled"); if (quirk & BYT_CHT_ES8316_MONO_SPEAKER) diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index b6434b4731261..d6991864c5a49 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -68,7 +68,8 @@ enum { BYT_RT5640_OVCD_SF_1P5 = (RT5640_OVCD_SF_1P5 << 13), }; -#define BYT_RT5640_MAP(quirk) ((quirk) & GENMASK(3, 0)) +#define BYT_RT5640_MAP_MASK GENMASK(3, 0) +#define BYT_RT5640_MAP(quirk) ((quirk) & BYT_RT5640_MAP_MASK) #define BYT_RT5640_JDSRC(quirk) (((quirk) & GENMASK(7, 4)) >> 4) #define BYT_RT5640_OVCD_TH(quirk) (((quirk) & GENMASK(12, 8)) >> 8) #define BYT_RT5640_OVCD_SF(quirk) (((quirk) & GENMASK(14, 13)) >> 13) @@ -140,7 +141,9 @@ static void log_quirks(struct device *dev) dev_info(dev, "quirk NO_INTERNAL_MIC_MAP enabled\n"); break; default: - dev_err(dev, "quirk map 0x%x is not supported, microphone input will not work\n", map); + dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to DMIC1_MAP\n", map); + byt_rt5640_quirk &= ~BYT_RT5640_MAP_MASK; + byt_rt5640_quirk |= BYT_RT5640_DMIC1_MAP; break; } if (byt_rt5640_quirk & BYT_RT5640_HSMIC2_ON_IN1) diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c index 8e4b821efe927..6860ac41e6b32 100644 --- a/sound/soc/intel/boards/bytcr_rt5651.c +++ b/sound/soc/intel/boards/bytcr_rt5651.c @@ -58,7 +58,8 @@ enum { BYT_RT5651_OVCD_SF_1P5 = (RT5651_OVCD_SF_1P5 << 13), }; -#define BYT_RT5651_MAP(quirk) ((quirk) & GENMASK(3, 0)) +#define BYT_RT5651_MAP_MASK GENMASK(3, 0) +#define BYT_RT5651_MAP(quirk) ((quirk) & BYT_RT5651_MAP_MASK) #define BYT_RT5651_JDSRC(quirk) (((quirk) & GENMASK(7, 4)) >> 4) #define BYT_RT5651_OVCD_TH(quirk) (((quirk) & GENMASK(12, 8)) >> 8) #define BYT_RT5651_OVCD_SF(quirk) (((quirk) & GENMASK(14, 13)) >> 13) @@ -100,14 +101,29 @@ MODULE_PARM_DESC(quirk, "Board-specific quirk override"); static void log_quirks(struct device *dev) { - if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_DMIC_MAP) + int map; + + map = BYT_RT5651_MAP(byt_rt5651_quirk); + switch (map) { + case BYT_RT5651_DMIC_MAP: dev_info(dev, "quirk DMIC_MAP enabled"); - if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN1_MAP) + break; + case BYT_RT5651_IN1_MAP: dev_info(dev, "quirk IN1_MAP enabled"); - if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN2_MAP) + break; + case BYT_RT5651_IN2_MAP: dev_info(dev, "quirk IN2_MAP enabled"); - if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN1_IN2_MAP) + break; + case BYT_RT5651_IN1_IN2_MAP: dev_info(dev, "quirk IN1_IN2_MAP enabled"); + break; + default: + dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to DMIC_MAP\n", map); + byt_rt5651_quirk &= ~BYT_RT5651_MAP_MASK; + byt_rt5651_quirk |= BYT_RT5651_DMIC_MAP; + break; + } + if (BYT_RT5651_JDSRC(byt_rt5651_quirk)) { dev_info(dev, "quirk realtek,jack-detect-source %ld\n", BYT_RT5651_JDSRC(byt_rt5651_quirk)); diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index 5911a05586516..00d840d5e585c 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -741,7 +741,7 @@ static int create_sdw_dailink(struct snd_soc_card *card, (*codec_conf)++; } - if (sof_end->include_sidecar) { + if (sof_end->include_sidecar && sof_end->codec_info->add_sidecar) { ret = sof_end->codec_info->add_sidecar(card, dai_links, codec_conf); if (ret) return ret; diff --git a/sound/soc/qcom/qdsp6/topology.c b/sound/soc/qcom/qdsp6/topology.c index 83319a928f291..01bb1bdee5cec 100644 --- a/sound/soc/qcom/qdsp6/topology.c +++ b/sound/soc/qcom/qdsp6/topology.c @@ -587,8 +587,8 @@ static int audioreach_widget_load_module_common(struct snd_soc_component *compon return PTR_ERR(cont); mod = audioreach_parse_common_tokens(apm, cont, &tplg_w->priv, w); - if (IS_ERR(mod)) - return PTR_ERR(mod); + if (IS_ERR_OR_NULL(mod)) + return mod ? PTR_ERR(mod) : -ENODEV; dobj = &w->dobj; dobj->private = mod; diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c index f6e24edd7adbe..898e4fcde2dde 100644 --- a/sound/soc/sof/intel/hda-pcm.c +++ b/sound/soc/sof/intel/hda-pcm.c @@ -29,6 +29,8 @@ #define SDnFMT_BITS(x) ((x) << 4) #define SDnFMT_CHAN(x) ((x) << 0) +#define HDA_MAX_PERIOD_TIME_HEADROOM 10 + static bool hda_always_enable_dmi_l1; module_param_named(always_enable_dmi_l1, hda_always_enable_dmi_l1, bool, 0444); MODULE_PARM_DESC(always_enable_dmi_l1, "SOF HDA always enable DMI l1"); @@ -276,19 +278,30 @@ int hda_dsp_pcm_open(struct snd_sof_dev *sdev, * On playback start the DMA will transfer dsp_max_burst_size_in_ms * amount of data in one initial burst to fill up the host DMA buffer. * Consequent DMA burst sizes are shorter and their length can vary. - * To make sure that userspace allocate large enough ALSA buffer we need - * to place a constraint on the buffer time. + * To avoid immediate xrun by the initial burst we need to place + * constraint on the period size (via PERIOD_TIME) to cover the size of + * the host buffer. + * We need to add headroom of max 10ms as the firmware needs time to + * settle to the 1ms pacing and initially it can run faster for few + * internal periods. * * On capture the DMA will transfer 1ms chunks. - * - * Exact dsp_max_burst_size_in_ms constraint is racy, so set the - * constraint to a minimum of 2x dsp_max_burst_size_in_ms. */ - if (spcm->stream[direction].dsp_max_burst_size_in_ms) + if (spcm->stream[direction].dsp_max_burst_size_in_ms) { + unsigned int period_time = spcm->stream[direction].dsp_max_burst_size_in_ms; + + /* + * add headroom over the maximum burst size to cover the time + * needed for the DMA pace to settle. + * Limit the headroom time to HDA_MAX_PERIOD_TIME_HEADROOM + */ + period_time += min(period_time, HDA_MAX_PERIOD_TIME_HEADROOM); + snd_pcm_hw_constraint_minmax(substream->runtime, - SNDRV_PCM_HW_PARAM_BUFFER_TIME, - spcm->stream[direction].dsp_max_burst_size_in_ms * USEC_PER_MSEC * 2, + SNDRV_PCM_HW_PARAM_PERIOD_TIME, + period_time * USEC_PER_MSEC, UINT_MAX); + } /* binding pcm substream to hda stream */ substream->runtime->private_data = &dsp_stream->hstream; diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c index 24f3cc7676142..2be0d02f9cf9b 100644 --- a/sound/soc/sof/intel/hda-stream.c +++ b/sound/soc/sof/intel/hda-stream.c @@ -1103,10 +1103,35 @@ u64 hda_dsp_get_stream_llp(struct snd_sof_dev *sdev, struct snd_soc_component *component, struct snd_pcm_substream *substream) { - struct hdac_stream *hstream = substream->runtime->private_data; - struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream); + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); + struct snd_soc_pcm_runtime *be_rtd = NULL; + struct hdac_ext_stream *hext_stream; + struct snd_soc_dai *cpu_dai; + struct snd_soc_dpcm *dpcm; u32 llp_l, llp_u; + /* + * The LLP needs to be read from the Link DMA used for this FE as it is + * allowed to use any combination of Link and Host channels + */ + for_each_dpcm_be(rtd, substream->stream, dpcm) { + if (dpcm->fe != rtd) + continue; + + be_rtd = dpcm->be; + } + + if (!be_rtd) + return 0; + + cpu_dai = snd_soc_rtd_to_cpu(be_rtd, 0); + if (!cpu_dai) + return 0; + + hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream); + if (!hext_stream) + return 0; + /* * The pplc_addr have been calculated during probe in * hda_dsp_stream_init(): diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c index e98b53b67d12b..19bbae725d838 100644 --- a/sound/soc/sof/ipc3-topology.c +++ b/sound/soc/sof/ipc3-topology.c @@ -2485,11 +2485,6 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif if (ret < 0) return ret; - /* free all the scheduler widgets now */ - ret = sof_ipc3_free_widgets_in_list(sdev, true, &dyn_widgets, verify); - if (ret < 0) - return ret; - /* * Tear down all pipelines associated with PCMs that did not get suspended * and unset the prepare flag so that they can be set up again during resume. @@ -2505,6 +2500,11 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif } } + /* free all the scheduler widgets now. This will also power down the secondary cores */ + ret = sof_ipc3_free_widgets_in_list(sdev, true, &dyn_widgets, verify); + if (ret < 0) + return ret; + list_for_each_entry(sroute, &sdev->route_list, list) sroute->setup = false; diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c index 9db2cdb321282..b11279f9d3c97 100644 --- a/sound/soc/sof/ipc4-pcm.c +++ b/sound/soc/sof/ipc4-pcm.c @@ -19,12 +19,14 @@ * struct sof_ipc4_timestamp_info - IPC4 timestamp info * @host_copier: the host copier of the pcm stream * @dai_copier: the dai copier of the pcm stream - * @stream_start_offset: reported by fw in memory window (converted to frames) - * @stream_end_offset: reported by fw in memory window (converted to frames) + * @stream_start_offset: reported by fw in memory window (converted to + * frames at host_copier sampling rate) + * @stream_end_offset: reported by fw in memory window (converted to + * frames at host_copier sampling rate) * @llp_offset: llp offset in memory window - * @boundary: wrap boundary should be used for the LLP frame counter * @delay: Calculated and stored in pointer callback. The stored value is - * returned in the delay callback. + * returned in the delay callback. Expressed in frames at host copier + * sampling rate. */ struct sof_ipc4_timestamp_info { struct sof_ipc4_copier *host_copier; @@ -33,7 +35,6 @@ struct sof_ipc4_timestamp_info { u64 stream_end_offset; u32 llp_offset; - u64 boundary; snd_pcm_sframes_t delay; }; @@ -48,6 +49,16 @@ struct sof_ipc4_pcm_stream_priv { bool chain_dma_allocated; }; +/* + * Modulus to use to compare host and link position counters. The sampling + * rates may be different, so the raw hardware counters will wrap + * around at different times. To calculate differences, use + * DELAY_BOUNDARY as a common modulus. This value must be smaller than + * the wrap-around point of any hardware counter, and larger than any + * valid delay measurement. + */ +#define DELAY_BOUNDARY U32_MAX + static inline struct sof_ipc4_timestamp_info * sof_ipc4_sps_to_time_info(struct snd_sof_pcm_stream *sps) { @@ -409,9 +420,33 @@ static int sof_ipc4_trigger_pipelines(struct snd_soc_component *component, * If use_chain_dma attribute is set we proceed to chained DMA * trigger function that handles the rest for the substream. */ - if (pipeline->use_chain_dma) - return sof_ipc4_chain_dma_trigger(sdev, spcm, substream->stream, - pipeline_list, state, cmd); + if (pipeline->use_chain_dma) { + struct sof_ipc4_timestamp_info *time_info; + + time_info = sof_ipc4_sps_to_time_info(&spcm->stream[substream->stream]); + + ret = sof_ipc4_chain_dma_trigger(sdev, spcm, substream->stream, + pipeline_list, state, cmd); + if (ret || !time_info) + return ret; + + if (state == SOF_IPC4_PIPE_PAUSED) { + /* + * Record the DAI position for delay reporting + * To handle multiple pause/resume/xrun we need to add + * the positions to simulate how the firmware behaves + */ + u64 pos = snd_sof_pcm_get_dai_frame_counter(sdev, component, + substream); + + time_info->stream_end_offset += pos; + } else if (state == SOF_IPC4_PIPE_RESET) { + /* Reset the end offset as the stream is stopped */ + time_info->stream_end_offset = 0; + } + + return 0; + } /* allocate memory for the pipeline data */ trigger_list = kzalloc(struct_size(trigger_list, pipeline_instance_ids, @@ -909,6 +944,35 @@ static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component, return 0; } +static u64 sof_ipc4_frames_dai_to_host(struct sof_ipc4_timestamp_info *time_info, u64 value) +{ + u64 dai_rate, host_rate; + + if (!time_info->dai_copier || !time_info->host_copier) + return value; + + /* + * copiers do not change sampling rate, so we can use the + * out_format independently of stream direction + */ + dai_rate = time_info->dai_copier->data.out_format.sampling_frequency; + host_rate = time_info->host_copier->data.out_format.sampling_frequency; + + if (!dai_rate || !host_rate || dai_rate == host_rate) + return value; + + /* take care not to overflow u64, rates can be up to 768000 */ + if (value > U32_MAX) { + value = div64_u64(value, dai_rate); + value *= host_rate; + } else { + value *= host_rate; + value = div64_u64(value, dai_rate); + } + + return value; +} + static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream, struct snd_sof_pcm_stream *sps, @@ -924,8 +988,30 @@ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev, if (!host_copier || !dai_copier) return -EINVAL; - if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_INVALID_NODE_ID) + if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_INVALID_NODE_ID) { return -EINVAL; + } else if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_CHAIN_DMA_NODE_ID) { + /* + * While the firmware does not support time_info reporting for + * streams using ChainDMA, it is granted that ChainDMA can only + * be used on Host+Link pairs where the link position is + * accessible from the host side. + * + * Enable delay calculation in case of ChainDMA via host + * accessible registers. + * + * The ChainDMA prefills the link DMA with a preamble + * of zero samples. Set the stream start offset based + * on size of the preamble (driver provided fifo size + * multiplied by 2.5). We add 1ms of margin as the FW + * will align the buffer size to DMA hardware + * alignment that is not known to host. + */ + int pre_ms = SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS * 5 / 2 + 1; + + time_info->stream_start_offset = pre_ms * substream->runtime->rate / MSEC_PER_SEC; + goto out; + } node_index = SOF_IPC4_NODE_INDEX(host_copier->data.gtw_cfg.node_id); offset = offsetof(struct sof_ipc4_fw_registers, pipeline_regs) + node_index * sizeof(ppl_reg); @@ -943,13 +1029,13 @@ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev, time_info->stream_end_offset = ppl_reg.stream_end_offset; do_div(time_info->stream_end_offset, dai_sample_size); - /* - * Calculate the wrap boundary need to be used for delay calculation - * The host counter is in bytes, it will wrap earlier than the frames - * based link counter. - */ - time_info->boundary = div64_u64(~((u64)0), - frames_to_bytes(substream->runtime, 1)); + /* convert to host frame time */ + time_info->stream_start_offset = + sof_ipc4_frames_dai_to_host(time_info, time_info->stream_start_offset); + time_info->stream_end_offset = + sof_ipc4_frames_dai_to_host(time_info, time_info->stream_end_offset); + +out: /* Initialize the delay value to 0 (no delay) */ time_info->delay = 0; @@ -992,6 +1078,8 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component, /* For delay calculation we need the host counter */ host_cnt = snd_sof_pcm_get_host_byte_counter(sdev, component, substream); + + /* Store the original value to host_ptr */ host_ptr = host_cnt; /* convert the host_cnt to frames */ @@ -1010,6 +1098,8 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component, sof_mailbox_read(sdev, time_info->llp_offset, &llp, sizeof(llp)); dai_cnt = ((u64)llp.reading.llp_u << 32) | llp.reading.llp_l; } + + dai_cnt = sof_ipc4_frames_dai_to_host(time_info, dai_cnt); dai_cnt += time_info->stream_end_offset; /* In two cases dai dma counter is not accurate @@ -1043,8 +1133,9 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component, dai_cnt -= time_info->stream_start_offset; } - /* Wrap the dai counter at the boundary where the host counter wraps */ - div64_u64_rem(dai_cnt, time_info->boundary, &dai_cnt); + /* Convert to a common base before comparisons */ + dai_cnt &= DELAY_BOUNDARY; + host_cnt &= DELAY_BOUNDARY; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { head_cnt = host_cnt; @@ -1054,14 +1145,11 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component, tail_cnt = host_cnt; } - if (head_cnt < tail_cnt) { - time_info->delay = time_info->boundary - tail_cnt + head_cnt; - goto out; - } - - time_info->delay = head_cnt - tail_cnt; + if (unlikely(head_cnt < tail_cnt)) + time_info->delay = DELAY_BOUNDARY - tail_cnt + head_cnt; + else + time_info->delay = head_cnt - tail_cnt; -out: /* * Convert the host byte counter to PCM pointer which wraps in buffer * and it is in frames diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c index f82db7f2a6b7e..4849a3ce02dca 100644 --- a/sound/soc/sof/ipc4-topology.c +++ b/sound/soc/sof/ipc4-topology.c @@ -32,7 +32,6 @@ MODULE_PARM_DESC(ipc4_ignore_cpc, #define SOF_IPC4_GAIN_PARAM_ID 0 #define SOF_IPC4_TPLG_ABI_SIZE 6 -#define SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS 2 static DEFINE_IDA(alh_group_ida); static DEFINE_IDA(pipeline_ida); @@ -519,8 +518,13 @@ static int sof_ipc4_widget_setup_pcm(struct snd_sof_widget *swidget) swidget->tuples, swidget->num_tuples, sizeof(u32), 1); /* Set default DMA buffer size if it is not specified in topology */ - if (!sps->dsp_max_burst_size_in_ms) - sps->dsp_max_burst_size_in_ms = SOF_IPC4_MIN_DMA_BUFFER_SIZE; + if (!sps->dsp_max_burst_size_in_ms) { + struct snd_sof_widget *pipe_widget = swidget->spipe->pipe_widget; + struct sof_ipc4_pipeline *pipeline = pipe_widget->private; + + sps->dsp_max_burst_size_in_ms = pipeline->use_chain_dma ? + SOF_IPC4_CHAIN_DMA_BUFFER_SIZE : SOF_IPC4_MIN_DMA_BUFFER_SIZE; + } } else { /* Capture data is copied from DSP to host in 1ms bursts */ spcm->stream[dir].dsp_max_burst_size_in_ms = 1; @@ -1777,10 +1781,10 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget, pipeline->msg.extension |= SOF_IPC4_GLB_EXT_CHAIN_DMA_FIFO_SIZE(fifo_size); /* - * Chain DMA does not support stream timestamping, set node_id to invalid - * to skip the code in sof_ipc4_get_stream_start_offset(). + * Chain DMA does not support stream timestamping, but it + * can use the host side registers for delay calculation. */ - copier_data->gtw_cfg.node_id = SOF_IPC4_INVALID_NODE_ID; + copier_data->gtw_cfg.node_id = SOF_IPC4_CHAIN_DMA_NODE_ID; return 0; } diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h index f4dc499c0ffe5..da9592430b152 100644 --- a/sound/soc/sof/ipc4-topology.h +++ b/sound/soc/sof/ipc4-topology.h @@ -58,10 +58,14 @@ #define SOF_IPC4_DMA_DEVICE_MAX_COUNT 16 +#define SOF_IPC4_CHAIN_DMA_NODE_ID 0x7fffffff #define SOF_IPC4_INVALID_NODE_ID 0xffffffff -/* FW requires minimum 2ms DMA buffer size */ -#define SOF_IPC4_MIN_DMA_BUFFER_SIZE 2 +/* FW requires minimum 4ms DMA buffer size */ +#define SOF_IPC4_MIN_DMA_BUFFER_SIZE 4 + +/* ChainDMA in fw uses 5ms DMA buffer */ +#define SOF_IPC4_CHAIN_DMA_BUFFER_SIZE 5 /* * The base of multi-gateways. Multi-gateways addressing starts from @@ -246,6 +250,8 @@ struct sof_ipc4_dma_stream_ch_map { #define SOF_IPC4_DMA_METHOD_HDA 1 #define SOF_IPC4_DMA_METHOD_GPDMA 2 /* defined for consistency but not used */ +#define SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS 2 + /** * struct sof_ipc4_dma_config: DMA configuration * @dma_method: HDAudio or GPDMA diff --git a/sound/usb/card.c b/sound/usb/card.c index 9c411b82a218d..d0a42859208aa 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -760,10 +760,16 @@ get_alias_quirk(struct usb_device *dev, unsigned int id) */ static int try_to_register_card(struct snd_usb_audio *chip, int ifnum) { + struct usb_interface *iface; + if (check_delayed_register_option(chip) == ifnum || - chip->last_iface == ifnum || - usb_interface_claimed(usb_ifnum_to_if(chip->dev, chip->last_iface))) + chip->last_iface == ifnum) + return snd_card_register(chip->card); + + iface = usb_ifnum_to_if(chip->dev, chip->last_iface); + if (iface && usb_interface_claimed(iface)) return snd_card_register(chip->card); + return 0; } diff --git a/sound/usb/midi.c b/sound/usb/midi.c index a792ada18863a..461e183680daa 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1522,12 +1522,12 @@ static void snd_usbmidi_free(struct snd_usb_midi *umidi) { int i; + if (!umidi->disconnected) + snd_usbmidi_disconnect(&umidi->list); + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; - if (ep->out) - snd_usbmidi_out_endpoint_delete(ep->out); - if (ep->in) - snd_usbmidi_in_endpoint_delete(ep->in); + kfree(ep->out); } mutex_destroy(&umidi->mutex); kfree(umidi); @@ -1553,7 +1553,7 @@ void snd_usbmidi_disconnect(struct list_head *p) spin_unlock_irq(&umidi->disc_lock); up_write(&umidi->disc_rwsem); - del_timer_sync(&umidi->error_timer); + timer_shutdown_sync(&umidi->error_timer); for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 7bd87193c6177..b663764644cd8 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -54,13 +55,13 @@ struct std_mono_table { * version, we keep it mono for simplicity. */ static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer, - unsigned int unitid, - unsigned int control, - unsigned int cmask, - int val_type, - unsigned int idx_off, - const char *name, - snd_kcontrol_tlv_rw_t *tlv_callback) + unsigned int unitid, + unsigned int control, + unsigned int cmask, + int val_type, + unsigned int idx_off, + const char *name, + snd_kcontrol_tlv_rw_t *tlv_callback) { struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; @@ -77,7 +78,8 @@ static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer, cval->idx_off = idx_off; /* get_min_max() is called only for integer volumes later, - * so provide a short-cut for booleans */ + * so provide a short-cut for booleans + */ cval->min = 0; cval->max = 1; cval->res = 0; @@ -107,15 +109,16 @@ static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer, } static int snd_create_std_mono_ctl(struct usb_mixer_interface *mixer, - unsigned int unitid, - unsigned int control, - unsigned int cmask, - int val_type, - const char *name, - snd_kcontrol_tlv_rw_t *tlv_callback) + unsigned int unitid, + unsigned int control, + unsigned int cmask, + int val_type, + const char *name, + snd_kcontrol_tlv_rw_t *tlv_callback) { return snd_create_std_mono_ctl_offset(mixer, unitid, control, cmask, - val_type, 0 /* Offset */, name, tlv_callback); + val_type, 0 /* Offset */, + name, tlv_callback); } /* @@ -126,9 +129,10 @@ static int snd_create_std_mono_table(struct usb_mixer_interface *mixer, { int err; - while (t->name != NULL) { + while (t->name) { err = snd_create_std_mono_ctl(mixer, t->unitid, t->control, - t->cmask, t->val_type, t->name, t->tlv_callback); + t->cmask, t->val_type, t->name, + t->tlv_callback); if (err < 0) return err; t++; @@ -208,12 +212,11 @@ static void snd_usb_soundblaster_remote_complete(struct urb *urb) if (code == rc->mute_code) snd_usb_mixer_notify_id(mixer, rc->mute_mixer_id); mixer->rc_code = code; - wmb(); wake_up(&mixer->rc_waitq); } static long snd_usb_sbrc_hwdep_read(struct snd_hwdep *hw, char __user *buf, - long count, loff_t *offset) + long count, loff_t *offset) { struct usb_mixer_interface *mixer = hw->private_data; int err; @@ -233,7 +236,7 @@ static long snd_usb_sbrc_hwdep_read(struct snd_hwdep *hw, char __user *buf, } static __poll_t snd_usb_sbrc_hwdep_poll(struct snd_hwdep *hw, struct file *file, - poll_table *wait) + poll_table *wait) { struct usb_mixer_interface *mixer = hw->private_data; @@ -309,20 +312,20 @@ static int snd_audigy2nx_led_update(struct usb_mixer_interface *mixer, if (chip->usb_id == USB_ID(0x041e, 0x3042)) err = snd_usb_ctl_msg(chip->dev, - usb_sndctrlpipe(chip->dev, 0), 0x24, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, - !value, 0, NULL, 0); + usb_sndctrlpipe(chip->dev, 0), 0x24, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, + !value, 0, NULL, 0); /* USB X-Fi S51 Pro */ if (chip->usb_id == USB_ID(0x041e, 0x30df)) err = snd_usb_ctl_msg(chip->dev, - usb_sndctrlpipe(chip->dev, 0), 0x24, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, - !value, 0, NULL, 0); + usb_sndctrlpipe(chip->dev, 0), 0x24, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, + !value, 0, NULL, 0); else err = snd_usb_ctl_msg(chip->dev, - usb_sndctrlpipe(chip->dev, 0), 0x24, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, - value, index + 2, NULL, 0); + usb_sndctrlpipe(chip->dev, 0), 0x24, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, + value, index + 2, NULL, 0); snd_usb_unlock_shutdown(chip); return err; } @@ -376,10 +379,10 @@ static int snd_audigy2nx_controls_create(struct usb_mixer_interface *mixer) struct snd_kcontrol_new knew; /* USB X-Fi S51 doesn't have a CMSS LED */ - if ((mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) && i == 0) + if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042) && i == 0) continue; /* USB X-Fi S51 Pro doesn't have one either */ - if ((mixer->chip->usb_id == USB_ID(0x041e, 0x30df)) && i == 0) + if (mixer->chip->usb_id == USB_ID(0x041e, 0x30df) && i == 0) continue; if (i > 1 && /* Live24ext has 2 LEDs only */ (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) || @@ -480,9 +483,9 @@ static int snd_emu0204_ch_switch_update(struct usb_mixer_interface *mixer, buf[0] = 0x01; buf[1] = value ? 0x02 : 0x01; err = snd_usb_ctl_msg(chip->dev, - usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR, - USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, - 0x0400, 0x0e00, buf, 2); + usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR, + USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, + 0x0400, 0x0e00, buf, 2); snd_usb_unlock_shutdown(chip); return err; } @@ -528,6 +531,265 @@ static int snd_emu0204_controls_create(struct usb_mixer_interface *mixer) &snd_emu0204_control, NULL); } +#if IS_REACHABLE(CONFIG_INPUT) +/* + * Sony DualSense controller (PS5) jack detection + * + * Since this is an UAC 1 device, it doesn't support jack detection. + * However, the controller hid-playstation driver reports HP & MIC + * insert events through a dedicated input device. + */ + +#define SND_DUALSENSE_JACK_OUT_TERM_ID 3 +#define SND_DUALSENSE_JACK_IN_TERM_ID 4 + +struct dualsense_mixer_elem_info { + struct usb_mixer_elem_info info; + struct input_handler ih; + struct input_device_id id_table[2]; + bool connected; +}; + +static void snd_dualsense_ih_event(struct input_handle *handle, + unsigned int type, unsigned int code, + int value) +{ + struct dualsense_mixer_elem_info *mei; + struct usb_mixer_elem_list *me; + + if (type != EV_SW) + return; + + mei = container_of(handle->handler, struct dualsense_mixer_elem_info, ih); + me = &mei->info.head; + + if ((me->id == SND_DUALSENSE_JACK_OUT_TERM_ID && code == SW_HEADPHONE_INSERT) || + (me->id == SND_DUALSENSE_JACK_IN_TERM_ID && code == SW_MICROPHONE_INSERT)) { + mei->connected = !!value; + snd_ctl_notify(me->mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, + &me->kctl->id); + } +} + +static bool snd_dualsense_ih_match(struct input_handler *handler, + struct input_dev *dev) +{ + struct dualsense_mixer_elem_info *mei; + struct usb_device *snd_dev; + char *input_dev_path, *usb_dev_path; + size_t usb_dev_path_len; + bool match = false; + + mei = container_of(handler, struct dualsense_mixer_elem_info, ih); + snd_dev = mei->info.head.mixer->chip->dev; + + input_dev_path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); + if (!input_dev_path) { + dev_warn(&snd_dev->dev, "Failed to get input dev path\n"); + return false; + } + + usb_dev_path = kobject_get_path(&snd_dev->dev.kobj, GFP_KERNEL); + if (!usb_dev_path) { + dev_warn(&snd_dev->dev, "Failed to get USB dev path\n"); + goto free_paths; + } + + /* + * Ensure the VID:PID matched input device supposedly owned by the + * hid-playstation driver belongs to the actual hardware handled by + * the current USB audio device, which implies input_dev_path being + * a subpath of usb_dev_path. + * + * This verification is necessary when there is more than one identical + * controller attached to the host system. + */ + usb_dev_path_len = strlen(usb_dev_path); + if (usb_dev_path_len >= strlen(input_dev_path)) + goto free_paths; + + usb_dev_path[usb_dev_path_len] = '/'; + match = !memcmp(input_dev_path, usb_dev_path, usb_dev_path_len + 1); + +free_paths: + kfree(input_dev_path); + kfree(usb_dev_path); + + return match; +} + +static int snd_dualsense_ih_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) +{ + struct input_handle *handle; + int err; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = handler->name; + + err = input_register_handle(handle); + if (err) + goto err_free; + + err = input_open_device(handle); + if (err) + goto err_unregister; + + return 0; + +err_unregister: + input_unregister_handle(handle); +err_free: + kfree(handle); + return err; +} + +static void snd_dualsense_ih_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static void snd_dualsense_ih_start(struct input_handle *handle) +{ + struct dualsense_mixer_elem_info *mei; + struct usb_mixer_elem_list *me; + int status = -1; + + mei = container_of(handle->handler, struct dualsense_mixer_elem_info, ih); + me = &mei->info.head; + + if (me->id == SND_DUALSENSE_JACK_OUT_TERM_ID && + test_bit(SW_HEADPHONE_INSERT, handle->dev->swbit)) + status = test_bit(SW_HEADPHONE_INSERT, handle->dev->sw); + else if (me->id == SND_DUALSENSE_JACK_IN_TERM_ID && + test_bit(SW_MICROPHONE_INSERT, handle->dev->swbit)) + status = test_bit(SW_MICROPHONE_INSERT, handle->dev->sw); + + if (status >= 0) { + mei->connected = !!status; + snd_ctl_notify(me->mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, + &me->kctl->id); + } +} + +static int snd_dualsense_jack_get(struct snd_kcontrol *kctl, + struct snd_ctl_elem_value *ucontrol) +{ + struct dualsense_mixer_elem_info *mei = snd_kcontrol_chip(kctl); + + ucontrol->value.integer.value[0] = mei->connected; + + return 0; +} + +static const struct snd_kcontrol_new snd_dualsense_jack_control = { + .iface = SNDRV_CTL_ELEM_IFACE_CARD, + .access = SNDRV_CTL_ELEM_ACCESS_READ, + .info = snd_ctl_boolean_mono_info, + .get = snd_dualsense_jack_get, +}; + +static int snd_dualsense_resume_jack(struct usb_mixer_elem_list *list) +{ + snd_ctl_notify(list->mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, + &list->kctl->id); + return 0; +} + +static void snd_dualsense_mixer_elem_free(struct snd_kcontrol *kctl) +{ + struct dualsense_mixer_elem_info *mei = snd_kcontrol_chip(kctl); + + if (mei->ih.event) + input_unregister_handler(&mei->ih); + + snd_usb_mixer_elem_free(kctl); +} + +static int snd_dualsense_jack_create(struct usb_mixer_interface *mixer, + const char *name, bool is_output) +{ + struct dualsense_mixer_elem_info *mei; + struct input_device_id *idev_id; + struct snd_kcontrol *kctl; + int err; + + mei = kzalloc(sizeof(*mei), GFP_KERNEL); + if (!mei) + return -ENOMEM; + + snd_usb_mixer_elem_init_std(&mei->info.head, mixer, + is_output ? SND_DUALSENSE_JACK_OUT_TERM_ID : + SND_DUALSENSE_JACK_IN_TERM_ID); + + mei->info.head.resume = snd_dualsense_resume_jack; + mei->info.val_type = USB_MIXER_BOOLEAN; + mei->info.channels = 1; + mei->info.min = 0; + mei->info.max = 1; + + kctl = snd_ctl_new1(&snd_dualsense_jack_control, mei); + if (!kctl) { + kfree(mei); + return -ENOMEM; + } + + strscpy(kctl->id.name, name, sizeof(kctl->id.name)); + kctl->private_free = snd_dualsense_mixer_elem_free; + + err = snd_usb_mixer_add_control(&mei->info.head, kctl); + if (err) + return err; + + idev_id = &mei->id_table[0]; + idev_id->flags = INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT | + INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT; + idev_id->vendor = USB_ID_VENDOR(mixer->chip->usb_id); + idev_id->product = USB_ID_PRODUCT(mixer->chip->usb_id); + idev_id->evbit[BIT_WORD(EV_SW)] = BIT_MASK(EV_SW); + if (is_output) + idev_id->swbit[BIT_WORD(SW_HEADPHONE_INSERT)] = BIT_MASK(SW_HEADPHONE_INSERT); + else + idev_id->swbit[BIT_WORD(SW_MICROPHONE_INSERT)] = BIT_MASK(SW_MICROPHONE_INSERT); + + mei->ih.event = snd_dualsense_ih_event; + mei->ih.match = snd_dualsense_ih_match; + mei->ih.connect = snd_dualsense_ih_connect; + mei->ih.disconnect = snd_dualsense_ih_disconnect; + mei->ih.start = snd_dualsense_ih_start; + mei->ih.name = name; + mei->ih.id_table = mei->id_table; + + err = input_register_handler(&mei->ih); + if (err) { + dev_warn(&mixer->chip->dev->dev, + "Could not register input handler: %d\n", err); + mei->ih.event = NULL; + } + + return 0; +} + +static int snd_dualsense_controls_create(struct usb_mixer_interface *mixer) +{ + int err; + + err = snd_dualsense_jack_create(mixer, "Headphone Jack", true); + if (err < 0) + return err; + + return snd_dualsense_jack_create(mixer, "Headset Mic Jack", false); +} +#endif /* IS_REACHABLE(CONFIG_INPUT) */ + /* ASUS Xonar U1 / U3 controls */ static int snd_xonar_u1_switch_get(struct snd_kcontrol *kcontrol, @@ -1020,7 +1282,7 @@ static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer, /* M-Audio FastTrack Ultra quirks */ /* FTU Effect switch (also used by C400/C600) */ static int snd_ftu_eff_switch_info(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) + struct snd_ctl_elem_info *uinfo) { static const char *const texts[8] = { "Room 1", "Room 2", "Room 3", "Hall 1", @@ -1054,7 +1316,7 @@ static int snd_ftu_eff_switch_init(struct usb_mixer_interface *mixer, } static int snd_ftu_eff_switch_get(struct snd_kcontrol *kctl, - struct snd_ctl_elem_value *ucontrol) + struct snd_ctl_elem_value *ucontrol) { ucontrol->value.enumerated.item[0] = kctl->private_value >> 24; return 0; @@ -1085,7 +1347,7 @@ static int snd_ftu_eff_switch_update(struct usb_mixer_elem_list *list) } static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl, - struct snd_ctl_elem_value *ucontrol) + struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_list *list = snd_kcontrol_chip(kctl); unsigned int pval = list->kctl->private_value; @@ -1103,7 +1365,7 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl, } static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer, - int validx, int bUnitID) + int validx, int bUnitID) { static struct snd_kcontrol_new template = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, @@ -1142,22 +1404,22 @@ static int snd_ftu_create_volume_ctls(struct usb_mixer_interface *mixer) for (in = 0; in < 8; in++) { cmask = BIT(in); snprintf(name, sizeof(name), - "AIn%d - Out%d Capture Volume", - in + 1, out + 1); + "AIn%d - Out%d Capture Volume", + in + 1, out + 1); err = snd_create_std_mono_ctl(mixer, id, control, - cmask, val_type, name, - &snd_usb_mixer_vol_tlv); + cmask, val_type, name, + &snd_usb_mixer_vol_tlv); if (err < 0) return err; } for (in = 8; in < 16; in++) { cmask = BIT(in); snprintf(name, sizeof(name), - "DIn%d - Out%d Playback Volume", - in - 7, out + 1); + "DIn%d - Out%d Playback Volume", + in - 7, out + 1); err = snd_create_std_mono_ctl(mixer, id, control, - cmask, val_type, name, - &snd_usb_mixer_vol_tlv); + cmask, val_type, name, + &snd_usb_mixer_vol_tlv); if (err < 0) return err; } @@ -1218,10 +1480,10 @@ static int snd_ftu_create_effect_return_ctls(struct usb_mixer_interface *mixer) for (ch = 0; ch < 4; ++ch) { cmask = BIT(ch); snprintf(name, sizeof(name), - "Effect Return %d Volume", ch + 1); + "Effect Return %d Volume", ch + 1); err = snd_create_std_mono_ctl(mixer, id, control, - cmask, val_type, name, - snd_usb_mixer_vol_tlv); + cmask, val_type, name, + snd_usb_mixer_vol_tlv); if (err < 0) return err; } @@ -1242,20 +1504,20 @@ static int snd_ftu_create_effect_send_ctls(struct usb_mixer_interface *mixer) for (ch = 0; ch < 8; ++ch) { cmask = BIT(ch); snprintf(name, sizeof(name), - "Effect Send AIn%d Volume", ch + 1); + "Effect Send AIn%d Volume", ch + 1); err = snd_create_std_mono_ctl(mixer, id, control, cmask, - val_type, name, - snd_usb_mixer_vol_tlv); + val_type, name, + snd_usb_mixer_vol_tlv); if (err < 0) return err; } for (ch = 8; ch < 16; ++ch) { cmask = BIT(ch); snprintf(name, sizeof(name), - "Effect Send DIn%d Volume", ch - 7); + "Effect Send DIn%d Volume", ch - 7); err = snd_create_std_mono_ctl(mixer, id, control, cmask, - val_type, name, - snd_usb_mixer_vol_tlv); + val_type, name, + snd_usb_mixer_vol_tlv); if (err < 0) return err; } @@ -1345,19 +1607,19 @@ static int snd_c400_create_vol_ctls(struct usb_mixer_interface *mixer) for (out = 0; out < num_outs; out++) { if (chan < num_outs) { snprintf(name, sizeof(name), - "PCM%d-Out%d Playback Volume", - chan + 1, out + 1); + "PCM%d-Out%d Playback Volume", + chan + 1, out + 1); } else { snprintf(name, sizeof(name), - "In%d-Out%d Playback Volume", - chan - num_outs + 1, out + 1); + "In%d-Out%d Playback Volume", + chan - num_outs + 1, out + 1); } cmask = (out == 0) ? 0 : BIT(out - 1); offset = chan * num_outs; err = snd_create_std_mono_ctl_offset(mixer, id, control, - cmask, val_type, offset, name, - &snd_usb_mixer_vol_tlv); + cmask, val_type, offset, name, + &snd_usb_mixer_vol_tlv); if (err < 0) return err; } @@ -1376,7 +1638,7 @@ static int snd_c400_create_effect_volume_ctl(struct usb_mixer_interface *mixer) const unsigned int cmask = 0; return snd_create_std_mono_ctl(mixer, id, control, cmask, val_type, - name, snd_usb_mixer_vol_tlv); + name, snd_usb_mixer_vol_tlv); } /* This control needs a volume quirk, see mixer.c */ @@ -1389,7 +1651,7 @@ static int snd_c400_create_effect_duration_ctl(struct usb_mixer_interface *mixer const unsigned int cmask = 0; return snd_create_std_mono_ctl(mixer, id, control, cmask, val_type, - name, snd_usb_mixer_vol_tlv); + name, snd_usb_mixer_vol_tlv); } /* This control needs a volume quirk, see mixer.c */ @@ -1402,7 +1664,7 @@ static int snd_c400_create_effect_feedback_ctl(struct usb_mixer_interface *mixer const unsigned int cmask = 0; return snd_create_std_mono_ctl(mixer, id, control, cmask, val_type, - name, NULL); + name, NULL); } static int snd_c400_create_effect_vol_ctls(struct usb_mixer_interface *mixer) @@ -1431,18 +1693,18 @@ static int snd_c400_create_effect_vol_ctls(struct usb_mixer_interface *mixer) for (chan = 0; chan < num_outs + num_ins; chan++) { if (chan < num_outs) { snprintf(name, sizeof(name), - "Effect Send DOut%d", - chan + 1); + "Effect Send DOut%d", + chan + 1); } else { snprintf(name, sizeof(name), - "Effect Send AIn%d", - chan - num_outs + 1); + "Effect Send AIn%d", + chan - num_outs + 1); } cmask = (chan == 0) ? 0 : BIT(chan - 1); err = snd_create_std_mono_ctl(mixer, id, control, - cmask, val_type, name, - &snd_usb_mixer_vol_tlv); + cmask, val_type, name, + &snd_usb_mixer_vol_tlv); if (err < 0) return err; } @@ -1477,14 +1739,14 @@ static int snd_c400_create_effect_ret_vol_ctls(struct usb_mixer_interface *mixer for (chan = 0; chan < num_outs; chan++) { snprintf(name, sizeof(name), - "Effect Return %d", - chan + 1); + "Effect Return %d", + chan + 1); cmask = (chan == 0) ? 0 : BIT(chan + (chan % 2) * num_outs - 1); err = snd_create_std_mono_ctl_offset(mixer, id, control, - cmask, val_type, offset, name, - &snd_usb_mixer_vol_tlv); + cmask, val_type, offset, name, + &snd_usb_mixer_vol_tlv); if (err < 0) return err; } @@ -1625,7 +1887,7 @@ static const struct std_mono_table ebox44_table[] = { * */ static int snd_microii_spdif_info(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) + struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; @@ -1633,7 +1895,7 @@ static int snd_microii_spdif_info(struct snd_kcontrol *kcontrol, } static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) + struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol); struct snd_usb_audio *chip = list->mixer->chip; @@ -1666,13 +1928,13 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol, ep = get_endpoint(alts, 0)->bEndpointAddress; err = snd_usb_ctl_msg(chip->dev, - usb_rcvctrlpipe(chip->dev, 0), - UAC_GET_CUR, - USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN, - UAC_EP_CS_ATTR_SAMPLE_RATE << 8, - ep, - data, - sizeof(data)); + usb_rcvctrlpipe(chip->dev, 0), + UAC_GET_CUR, + USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN, + UAC_EP_CS_ATTR_SAMPLE_RATE << 8, + ep, + data, + sizeof(data)); if (err < 0) goto end; @@ -1699,26 +1961,26 @@ static int snd_microii_spdif_default_update(struct usb_mixer_elem_list *list) reg = ((pval >> 4) & 0xf0) | (pval & 0x0f); err = snd_usb_ctl_msg(chip->dev, - usb_sndctrlpipe(chip->dev, 0), - UAC_SET_CUR, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, - reg, - 2, - NULL, - 0); + usb_sndctrlpipe(chip->dev, 0), + UAC_SET_CUR, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, + reg, + 2, + NULL, + 0); if (err < 0) goto end; reg = (pval & IEC958_AES0_NONAUDIO) ? 0xa0 : 0x20; reg |= (pval >> 12) & 0x0f; err = snd_usb_ctl_msg(chip->dev, - usb_sndctrlpipe(chip->dev, 0), - UAC_SET_CUR, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, - reg, - 3, - NULL, - 0); + usb_sndctrlpipe(chip->dev, 0), + UAC_SET_CUR, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, + reg, + 3, + NULL, + 0); if (err < 0) goto end; @@ -1728,13 +1990,14 @@ static int snd_microii_spdif_default_update(struct usb_mixer_elem_list *list) } static int snd_microii_spdif_default_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) + struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol); unsigned int pval, pval_old; int err; - pval = pval_old = kcontrol->private_value; + pval = kcontrol->private_value; + pval_old = pval; pval &= 0xfffff0f0; pval |= (ucontrol->value.iec958.status[1] & 0x0f) << 8; pval |= (ucontrol->value.iec958.status[0] & 0x0f); @@ -1755,7 +2018,7 @@ static int snd_microii_spdif_default_put(struct snd_kcontrol *kcontrol, } static int snd_microii_spdif_mask_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) + struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = 0x0f; ucontrol->value.iec958.status[1] = 0xff; @@ -1766,7 +2029,7 @@ static int snd_microii_spdif_mask_get(struct snd_kcontrol *kcontrol, } static int snd_microii_spdif_switch_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) + struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = !(kcontrol->private_value & 0x02); @@ -1784,20 +2047,20 @@ static int snd_microii_spdif_switch_update(struct usb_mixer_elem_list *list) return err; err = snd_usb_ctl_msg(chip->dev, - usb_sndctrlpipe(chip->dev, 0), - UAC_SET_CUR, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, - reg, - 9, - NULL, - 0); + usb_sndctrlpipe(chip->dev, 0), + UAC_SET_CUR, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, + reg, + 9, + NULL, + 0); snd_usb_unlock_shutdown(chip); return err; } static int snd_microii_spdif_switch_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) + struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol); u8 reg; @@ -1882,9 +2145,9 @@ static int snd_soundblaster_e1_switch_update(struct usb_mixer_interface *mixer, if (err < 0) return err; err = snd_usb_ctl_msg(chip->dev, - usb_sndctrlpipe(chip->dev, 0), HID_REQ_SET_REPORT, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, - 0x0202, 3, buff, 2); + usb_sndctrlpipe(chip->dev, 0), HID_REQ_SET_REPORT, + USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, + 0x0202, 3, buff, 2); snd_usb_unlock_shutdown(chip); return err; } @@ -3234,7 +3497,7 @@ static int snd_rme_digiface_enum_put(struct snd_kcontrol *kcontrol, } static int snd_rme_digiface_current_sync_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) + struct snd_ctl_elem_value *ucontrol) { int ret = snd_rme_digiface_enum_get(kcontrol, ucontrol); @@ -3806,7 +4069,7 @@ static const struct snd_djm_device snd_djm_devices[] = { static int snd_djm_controls_info(struct snd_kcontrol *kctl, - struct snd_ctl_elem_info *info) + struct snd_ctl_elem_info *info) { unsigned long private_value = kctl->private_value; u8 device_idx = (private_value & SND_DJM_DEVICE_MASK) >> SND_DJM_DEVICE_SHIFT; @@ -3825,8 +4088,8 @@ static int snd_djm_controls_info(struct snd_kcontrol *kctl, info->value.enumerated.item = noptions - 1; name = snd_djm_get_label(device_idx, - ctl->options[info->value.enumerated.item], - ctl->wIndex); + ctl->options[info->value.enumerated.item], + ctl->wIndex); if (!name) return -EINVAL; @@ -3838,25 +4101,25 @@ static int snd_djm_controls_info(struct snd_kcontrol *kctl, } static int snd_djm_controls_update(struct usb_mixer_interface *mixer, - u8 device_idx, u8 group, u16 value) + u8 device_idx, u8 group, u16 value) { int err; const struct snd_djm_device *device = &snd_djm_devices[device_idx]; - if ((group >= device->ncontrols) || value >= device->controls[group].noptions) + if (group >= device->ncontrols || value >= device->controls[group].noptions) return -EINVAL; err = snd_usb_lock_shutdown(mixer->chip); if (err) return err; - err = snd_usb_ctl_msg( - mixer->chip->dev, usb_sndctrlpipe(mixer->chip->dev, 0), - USB_REQ_SET_FEATURE, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - device->controls[group].options[value], - device->controls[group].wIndex, - NULL, 0); + err = snd_usb_ctl_msg(mixer->chip->dev, + usb_sndctrlpipe(mixer->chip->dev, 0), + USB_REQ_SET_FEATURE, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + device->controls[group].options[value], + device->controls[group].wIndex, + NULL, 0); snd_usb_unlock_shutdown(mixer->chip); return err; @@ -3897,7 +4160,7 @@ static int snd_djm_controls_resume(struct usb_mixer_elem_list *list) } static int snd_djm_controls_create(struct usb_mixer_interface *mixer, - const u8 device_idx) + const u8 device_idx) { int err, i; u16 value; @@ -3916,10 +4179,10 @@ static int snd_djm_controls_create(struct usb_mixer_interface *mixer, for (i = 0; i < device->ncontrols; i++) { value = device->controls[i].default_value; knew.name = device->controls[i].name; - knew.private_value = ( + knew.private_value = ((unsigned long)device_idx << SND_DJM_DEVICE_SHIFT) | (i << SND_DJM_GROUP_SHIFT) | - value); + value; err = snd_djm_controls_update(mixer, device_idx, i, value); if (err) return err; @@ -3961,6 +4224,13 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) err = snd_emu0204_controls_create(mixer); break; +#if IS_REACHABLE(CONFIG_INPUT) + case USB_ID(0x054c, 0x0ce6): /* Sony DualSense controller (PS5) */ + case USB_ID(0x054c, 0x0df2): /* Sony DualSense Edge controller (PS5) */ + err = snd_dualsense_controls_create(mixer); + break; +#endif /* IS_REACHABLE(CONFIG_INPUT) */ + case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */ case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C400 */ err = snd_c400_create_mixer(mixer); @@ -3986,13 +4256,15 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) break; case USB_ID(0x17cc, 0x1011): /* Traktor Audio 6 */ - err = snd_nativeinstruments_create_mixer(mixer, + err = snd_nativeinstruments_create_mixer(/* checkpatch hack */ + mixer, snd_nativeinstruments_ta6_mixers, ARRAY_SIZE(snd_nativeinstruments_ta6_mixers)); break; case USB_ID(0x17cc, 0x1021): /* Traktor Audio 10 */ - err = snd_nativeinstruments_create_mixer(mixer, + err = snd_nativeinstruments_create_mixer(/* checkpatch hack */ + mixer, snd_nativeinstruments_ta10_mixers, ARRAY_SIZE(snd_nativeinstruments_ta10_mixers)); break; @@ -4127,7 +4399,8 @@ static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer, struct snd_kcontrol *kctl) { /* Approximation using 10 ranges based on output measurement on hw v1.2. - * This seems close to the cubic mapping e.g. alsamixer uses. */ + * This seems close to the cubic mapping e.g. alsamixer uses. + */ static const DECLARE_TLV_DB_RANGE(scale, 0, 1, TLV_DB_MINMAX_ITEM(-5300, -4970), 2, 5, TLV_DB_MINMAX_ITEM(-4710, -4160), @@ -4211,16 +4484,12 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, if (unitid == 7 && cval->control == UAC_FU_VOLUME) snd_dragonfly_quirk_db_scale(mixer, cval, kctl); break; + } + /* lowest playback value is muted on some devices */ - case USB_ID(0x0572, 0x1b09): /* Conexant Systems (Rockwell), Inc. */ - case USB_ID(0x0d8c, 0x000c): /* C-Media */ - case USB_ID(0x0d8c, 0x0014): /* C-Media */ - case USB_ID(0x19f7, 0x0003): /* RODE NT-USB */ - case USB_ID(0x2d99, 0x0026): /* HECATE G2 GAMING HEADSET */ + if (mixer->chip->quirk_flags & QUIRK_FLAG_MIXER_MIN_MUTE) if (strstr(kctl->id.name, "Playback")) cval->min_mute = 1; - break; - } /* ALSA-ify some Plantronics headset control names */ if (USB_ID_VENDOR(mixer->chip->usb_id) == 0x047f && diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 0da4ee9757c01..8a20508e055a3 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -2196,6 +2196,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_SET_IFACE_FIRST), DEVICE_FLG(0x0556, 0x0014, /* Phoenix Audio TMX320VC */ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x0572, 0x1b08, /* Conexant Systems (Rockwell), Inc. */ + QUIRK_FLAG_MIXER_MIN_MUTE), + DEVICE_FLG(0x0572, 0x1b09, /* Conexant Systems (Rockwell), Inc. */ + QUIRK_FLAG_MIXER_MIN_MUTE), DEVICE_FLG(0x05a3, 0x9420, /* ELP HD USB Camera */ QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x05a7, 0x1020, /* Bose Companion 5 */ @@ -2238,12 +2242,16 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */ QUIRK_FLAG_CTL_MSG_DELAY_1M), + DEVICE_FLG(0x0bda, 0x498a, /* Realtek Semiconductor Corp. */ + QUIRK_FLAG_MIXER_MIN_MUTE), DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */ QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x0c45, 0x636b, /* Microdia JP001 USB Camera */ QUIRK_FLAG_GET_SAMPLE_RATE), - DEVICE_FLG(0x0d8c, 0x0014, /* USB Audio Device */ - QUIRK_FLAG_CTL_MSG_DELAY_1M), + DEVICE_FLG(0x0d8c, 0x000c, /* C-Media */ + QUIRK_FLAG_MIXER_MIN_MUTE), + DEVICE_FLG(0x0d8c, 0x0014, /* C-Media */ + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIXER_MIN_MUTE), DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */ QUIRK_FLAG_FIXED_RATE), DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */ @@ -2252,6 +2260,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER), DEVICE_FLG(0x1101, 0x0003, /* Audioengine D1 */ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x12d1, 0x3a07, /* Huawei Technologies Co., Ltd. */ + QUIRK_FLAG_MIXER_MIN_MUTE), DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */ QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16), DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */ @@ -2290,6 +2300,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY), DEVICE_FLG(0x1901, 0x0191, /* GE B850V3 CP2114 audio interface */ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x19f7, 0x0003, /* RODE NT-USB */ + QUIRK_FLAG_MIXER_MIN_MUTE), DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */ QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x1bcf, 0x2281, /* HD Webcam */ @@ -2340,6 +2352,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_IGNORE_CTL_ERROR), DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x2a70, 0x1881, /* OnePlus Technology (Shenzhen) Co., Ltd. BE02T */ + QUIRK_FLAG_MIXER_MIN_MUTE), DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */ QUIRK_FLAG_GENERIC_IMPLICIT_FB), DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */ @@ -2350,10 +2364,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x2d95, 0x8021, /* VIVO USB-C-XE710 HEADSET */ QUIRK_FLAG_CTL_MSG_DELAY_1M), + DEVICE_FLG(0x2d99, 0x0026, /* HECATE G2 GAMING HEADSET */ + QUIRK_FLAG_MIXER_MIN_MUTE), DEVICE_FLG(0x2fc6, 0xf0b7, /* iBasso DC07 Pro */ QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */ QUIRK_FLAG_IGNORE_CTL_ERROR), + DEVICE_FLG(0x339b, 0x3a07, /* Synaptics HONOR USB-C HEADSET */ + QUIRK_FLAG_MIXER_MIN_MUTE), DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */ QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x534d, 0x0021, /* MacroSilicon MS2100/MS2106 */ @@ -2405,6 +2423,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_DSD_RAW), VENDOR_FLG(0x2d87, /* Cayin device */ QUIRK_FLAG_DSD_RAW), + VENDOR_FLG(0x2fc6, /* Comture-inc devices */ + QUIRK_FLAG_DSD_RAW), VENDOR_FLG(0x3336, /* HEM devices */ QUIRK_FLAG_DSD_RAW), VENDOR_FLG(0x3353, /* Khadas devices */ diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 158ec053dc44d..1ef4d39978df3 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -196,6 +196,9 @@ extern bool snd_usb_skip_validation; * for the given endpoint. * QUIRK_FLAG_MIC_RES_16 and QUIRK_FLAG_MIC_RES_384 * Set the fixed resolution for Mic Capture Volume (mostly for webcams) + * QUIRK_FLAG_MIXER_MIN_MUTE + * Set minimum volume control value as mute for devices where the lowest + * playback value represents muted state instead of minimum audible volume */ #define QUIRK_FLAG_GET_SAMPLE_RATE (1U << 0) @@ -222,5 +225,6 @@ extern bool snd_usb_skip_validation; #define QUIRK_FLAG_FIXED_RATE (1U << 21) #define QUIRK_FLAG_MIC_RES_16 (1U << 22) #define QUIRK_FLAG_MIC_RES_384 (1U << 23) +#define QUIRK_FLAG_MIXER_MIN_MUTE (1U << 24) #endif /* __USBAUDIO_H */ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 1658596188bf8..592ca17b4b74a 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -327,10 +327,10 @@ $(OUTPUT)test-libcapstone.bin: $(BUILD) # -lcapstone provided by $(FEATURE_CHECK_LDFLAGS-libcapstone) $(OUTPUT)test-compile-32.bin: - $(CC) -m32 -o $@ test-compile.c + $(CC) -m32 -Wall -Werror -o $@ test-compile.c $(OUTPUT)test-compile-x32.bin: - $(CC) -mx32 -o $@ test-compile.c + $(CC) -mx32 -Wall -Werror -o $@ test-compile.c $(OUTPUT)test-zlib.bin: $(BUILD) -lz diff --git a/tools/include/nolibc/std.h b/tools/include/nolibc/std.h index a9d8b5b51f37f..f24953f8b949c 100644 --- a/tools/include/nolibc/std.h +++ b/tools/include/nolibc/std.h @@ -33,6 +33,6 @@ typedef unsigned long nlink_t; typedef signed long off_t; typedef signed long blksize_t; typedef signed long blkcnt_t; -typedef __kernel_old_time_t time_t; +typedef __kernel_time_t time_t; #endif /* _NOLIBC_STD_H */ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e33cf3caf8b64..060aecf60b76b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -990,35 +990,33 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw, const struct btf_member *kern_data_member; struct btf *btf = NULL; __s32 kern_vtype_id, kern_type_id; - char tname[256]; + char tname[192], stname[256]; __u32 i; snprintf(tname, sizeof(tname), "%.*s", (int)bpf_core_essential_name_len(tname_raw), tname_raw); - kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT, - &btf, mod_btf); - if (kern_type_id < 0) { - pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", - tname); - return kern_type_id; - } - kern_type = btf__type_by_id(btf, kern_type_id); + snprintf(stname, sizeof(stname), "%s%s", STRUCT_OPS_VALUE_PREFIX, tname); - /* Find the corresponding "map_value" type that will be used - * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example, - * find "struct bpf_struct_ops_tcp_congestion_ops" from the - * btf_vmlinux. + /* Look for the corresponding "map_value" type that will be used + * in map_update(BPF_MAP_TYPE_STRUCT_OPS) first, figure out the btf + * and the mod_btf. + * For example, find "struct bpf_struct_ops_tcp_congestion_ops". */ - kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX, - tname, BTF_KIND_STRUCT); + kern_vtype_id = find_ksym_btf_id(obj, stname, BTF_KIND_STRUCT, &btf, mod_btf); if (kern_vtype_id < 0) { - pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n", - STRUCT_OPS_VALUE_PREFIX, tname); + pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", stname); return kern_vtype_id; } kern_vtype = btf__type_by_id(btf, kern_vtype_id); + kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); + if (kern_type_id < 0) { + pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname); + return kern_type_id; + } + kern_type = btf__type_by_id(btf, kern_type_id); + /* Find "struct tcp_congestion_ops" from * struct bpf_struct_ops_tcp_congestion_ops { * [ ... ] @@ -1031,8 +1029,8 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw, break; } if (i == btf_vlen(kern_vtype)) { - pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n", - tname, STRUCT_OPS_VALUE_PREFIX, tname); + pr_warn("struct_ops init_kern: struct %s data is not found in struct %s\n", + tname, stname); return -EINVAL; } @@ -5056,6 +5054,16 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) return false; } + /* + * bpf_get_map_info_by_fd() for DEVMAP will always return flags with + * BPF_F_RDONLY_PROG set, but it generally is not set at map creation time. + * Thus, ignore the BPF_F_RDONLY_PROG flag in the flags returned from + * bpf_get_map_info_by_fd() when checking for compatibility with an + * existing DEVMAP. + */ + if (map->def.type == BPF_MAP_TYPE_DEVMAP || map->def.type == BPF_MAP_TYPE_DEVMAP_HASH) + map_info.map_flags &= ~BPF_F_RDONLY_PROG; + return (map_info.type == map->def.type && map_info.key_size == map->def.key_size && map_info.value_size == map->def.value_size && diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h index 37bb7771d9143..32b75c0326c93 100644 --- a/tools/lib/perf/include/perf/event.h +++ b/tools/lib/perf/include/perf/event.h @@ -291,6 +291,7 @@ struct perf_record_header_event_type { struct perf_record_header_tracing_data { struct perf_event_header header; __u32 size; + __u32 pad; }; #define PERF_RECORD_MISC_BUILD_ID_SIZE (1 << 15) diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c index 9ef569492560e..ddaeb4eb3e249 100644 --- a/tools/lib/subcmd/help.c +++ b/tools/lib/subcmd/help.c @@ -75,6 +75,9 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) size_t ci, cj, ei; int cmp; + if (!excludes->cnt) + return; + ci = cj = ei = 0; while (ci < cmds->cnt && ei < excludes->cnt) { cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name); diff --git a/tools/objtool/check.c b/tools/objtool/check.c index d4d82bb9b5511..59ca5b0c093d8 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -209,6 +209,7 @@ static bool is_rust_noreturn(const struct symbol *func) * these come from the Rust standard library). */ return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || + str_ends_with(func->name, "_4core6option13expect_failed") || str_ends_with(func->name, "_4core6option13unwrap_failed") || str_ends_with(func->name, "_4core6result13unwrap_failed") || str_ends_with(func->name, "_4core9panicking5panic") || diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 628c61397d2d3..b578930ed76a4 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -639,8 +639,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) * (behavior changed with commit b0a873e). */ if (errno == EINVAL || errno == ENOSYS || - errno == ENOENT || errno == EOPNOTSUPP || - errno == ENXIO) { + errno == ENOENT || errno == ENXIO) { if (verbose > 0) ui__warning("%s event is not supported by the kernel.\n", evsel__name(counter)); @@ -658,7 +657,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) if (verbose > 0) ui__warning("%s\n", msg); return COUNTER_RETRY; - } else if (target__has_per_thread(&target) && + } else if (target__has_per_thread(&target) && errno != EOPNOTSUPP && evsel_list->core.threads && evsel_list->core.threads->err_thread != -1) { /* @@ -679,6 +678,19 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) return COUNTER_SKIP; } + if (errno == EOPNOTSUPP) { + if (verbose > 0) { + ui__warning("%s event is not supported by the kernel.\n", + evsel__name(counter)); + } + counter->supported = false; + counter->errored = true; + + if ((evsel__leader(counter) != counter) || + !(counter->core.leader->nr_members > 1)) + return COUNTER_SKIP; + } + evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); ui__error("%s\n", msg); diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json index 5228f94a793f9..6817cac149e0b 100644 --- a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json @@ -113,7 +113,7 @@ { "MetricName": "load_store_spec_rate", "MetricExpr": "LDST_SPEC / INST_SPEC", - "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed", + "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speculatively executed", "MetricGroup": "Operation_Mix", "ScaleUnit": "100percent of operations" }, @@ -132,7 +132,7 @@ { "MetricName": "pc_write_spec_rate", "MetricExpr": "PC_WRITE_SPEC / INST_SPEC", - "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed", + "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speculatively executed", "MetricGroup": "Operation_Mix", "ScaleUnit": "100percent of operations" }, @@ -195,14 +195,14 @@ { "MetricName": "stall_frontend_cache_rate", "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES", - "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss", + "BriefDescription": "Proportion of cycles stalled and no operations delivered from frontend and cache miss", "MetricGroup": "Stall", "ScaleUnit": "100percent of cycles" }, { "MetricName": "stall_frontend_tlb_rate", "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES", - "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss", + "BriefDescription": "Proportion of cycles stalled and no operations delivered from frontend and TLB miss", "MetricGroup": "Stall", "ScaleUnit": "100percent of cycles" }, @@ -391,7 +391,7 @@ "ScaleUnit": "100percent of cache acceses" }, { - "MetricName": "l1d_cache_access_prefetces", + "MetricName": "l1d_cache_access_prefetches", "MetricExpr": "L1D_CACHE_PRFM / L1D_CACHE", "BriefDescription": "L1D cache access - prefetch", "MetricGroup": "Cache", diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c index 1c4feec1adff1..6e7f053006b4f 100644 --- a/tools/perf/tests/perf-record.c +++ b/tools/perf/tests/perf-record.c @@ -115,6 +115,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest if (err < 0) { pr_debug("sched__get_first_possible_cpu: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); + evlist__cancel_workload(evlist); goto out_delete_evlist; } @@ -126,6 +127,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { pr_debug("sched_setaffinity: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); + evlist__cancel_workload(evlist); goto out_delete_evlist; } @@ -137,6 +139,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest if (err < 0) { pr_debug("perf_evlist__open: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); + evlist__cancel_workload(evlist); goto out_delete_evlist; } @@ -149,6 +152,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest if (err < 0) { pr_debug("evlist__mmap: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); + evlist__cancel_workload(evlist); goto out_delete_evlist; } diff --git a/tools/perf/tests/shell/record_lbr.sh b/tools/perf/tests/shell/record_lbr.sh index 32314641217e6..5984ca9b78f8a 100755 --- a/tools/perf/tests/shell/record_lbr.sh +++ b/tools/perf/tests/shell/record_lbr.sh @@ -4,7 +4,12 @@ set -e -if [ ! -f /sys/devices/cpu/caps/branches ] && [ ! -f /sys/devices/cpu_core/caps/branches ] +ParanoidAndNotRoot() { + [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ] +} + +if [ ! -f /sys/bus/event_source/devices/cpu/caps/branches ] && + [ ! -f /sys/bus/event_source/devices/cpu_core/caps/branches ] then echo "Skip: only x86 CPUs support LBR" exit 2 @@ -22,6 +27,7 @@ cleanup() { } trap_cleanup() { + echo "Unexpected signal in ${FUNCNAME[1]}" cleanup exit 1 } @@ -122,8 +128,11 @@ lbr_test "-j ind_call" "any indirect call" 2 lbr_test "-j ind_jmp" "any indirect jump" 100 lbr_test "-j call" "direct calls" 2 lbr_test "-j ind_call,u" "any indirect user call" 100 -lbr_test "-a -b" "system wide any branch" 2 -lbr_test "-a -j any_call" "system wide any call" 2 +if ! ParanoidAndNotRoot 1 +then + lbr_test "-a -b" "system wide any branch" 2 + lbr_test "-a -j any_call" "system wide any call" 2 +fi # Parallel parallel_lbr_test "-b" "parallel any branch" 100 & @@ -140,10 +149,16 @@ parallel_lbr_test "-j call" "parallel direct calls" 100 & pid6=$! parallel_lbr_test "-j ind_call,u" "parallel any indirect user call" 100 & pid7=$! -parallel_lbr_test "-a -b" "parallel system wide any branch" 100 & -pid8=$! -parallel_lbr_test "-a -j any_call" "parallel system wide any call" 100 & -pid9=$! +if ParanoidAndNotRoot 1 +then + pid8= + pid9= +else + parallel_lbr_test "-a -b" "parallel system wide any branch" 100 & + pid8=$! + parallel_lbr_test "-a -j any_call" "parallel system wide any call" 100 & + pid9=$! +fi for pid in $pid1 $pid2 $pid3 $pid4 $pid5 $pid6 $pid7 $pid8 $pid9 do diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh index 3f1e67795490a..62f13dfeae8e4 100755 --- a/tools/perf/tests/shell/stat.sh +++ b/tools/perf/tests/shell/stat.sh @@ -146,6 +146,34 @@ test_cputype() { echo "cputype test [Success]" } +test_hybrid() { + # Test the default stat command on hybrid devices opens one cycles event for + # each CPU type. + echo "hybrid test" + + # Count the number of core PMUs, assume minimum of 1 + pmus=$(ls /sys/bus/event_source/devices/*/cpus 2>/dev/null | wc -l) + if [ "$pmus" -lt 1 ] + then + pmus=1 + fi + + # Run default Perf stat + cycles_events=$(perf stat -- true 2>&1 | grep -E "/cycles/[uH]*| cycles[:uH]* " -c) + + # The expectation is that default output will have a cycles events on each + # hybrid PMU. In situations with no cycles PMU events, like virtualized, this + # can fall back to task-clock and so the end count may be 0. Fail if neither + # condition holds. + if [ "$pmus" -ne "$cycles_events" ] && [ "0" -ne "$cycles_events" ] + then + echo "hybrid test [Found $pmus PMUs but $cycles_events cycles events. Failed]" + err=1 + return + fi + echo "hybrid test [Success]" +} + test_default_stat test_stat_record_report test_stat_record_script @@ -153,4 +181,5 @@ test_stat_repeat_weak_groups test_topdown_groups test_topdown_weak_groups test_cputype +test_hybrid exit $err diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh index 8d1e6bbeac906..1447d7425f381 100755 --- a/tools/perf/tests/shell/trace_btf_enum.sh +++ b/tools/perf/tests/shell/trace_btf_enum.sh @@ -23,6 +23,14 @@ check_vmlinux() { fi } +check_permissions() { + if perf trace -e $syscall $TESTPROG 2>&1 | grep -q "Operation not permitted" + then + echo "trace+enum test [Skipped permissions]" + err=2 + fi +} + trace_landlock() { echo "Tracing syscall ${syscall}" @@ -54,6 +62,9 @@ trace_non_syscall() { } check_vmlinux +if [ $err = 0 ]; then + check_permissions +fi if [ $err = 0 ]; then trace_landlock diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h index 1443c28545a94..358c611eeddbb 100644 --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h @@ -56,15 +56,15 @@ enum arm_spe_op_type { ARM_SPE_OP_BR_INDIRECT = 1 << 17, }; -enum arm_spe_neoverse_data_source { - ARM_SPE_NV_L1D = 0x0, - ARM_SPE_NV_L2 = 0x8, - ARM_SPE_NV_PEER_CORE = 0x9, - ARM_SPE_NV_LOCAL_CLUSTER = 0xa, - ARM_SPE_NV_SYS_CACHE = 0xb, - ARM_SPE_NV_PEER_CLUSTER = 0xc, - ARM_SPE_NV_REMOTE = 0xd, - ARM_SPE_NV_DRAM = 0xe, +enum arm_spe_common_data_source { + ARM_SPE_COMMON_DS_L1D = 0x0, + ARM_SPE_COMMON_DS_L2 = 0x8, + ARM_SPE_COMMON_DS_PEER_CORE = 0x9, + ARM_SPE_COMMON_DS_LOCAL_CLUSTER = 0xa, + ARM_SPE_COMMON_DS_SYS_CACHE = 0xb, + ARM_SPE_COMMON_DS_PEER_CLUSTER = 0xc, + ARM_SPE_COMMON_DS_REMOTE = 0xd, + ARM_SPE_COMMON_DS_DRAM = 0xe, }; struct arm_spe_record { diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 2c06f2a85400e..9890b17241c34 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -411,15 +411,15 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq, return arm_spe_deliver_synth_event(spe, speq, event, &sample); } -static const struct midr_range neoverse_spe[] = { +static const struct midr_range common_ds_encoding_cpus[] = { MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), {}, }; -static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record, - union perf_mem_data_src *data_src) +static void arm_spe__synth_data_source_common(const struct arm_spe_record *record, + union perf_mem_data_src *data_src) { /* * Even though four levels of cache hierarchy are possible, no known @@ -441,17 +441,17 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec } switch (record->source) { - case ARM_SPE_NV_L1D: + case ARM_SPE_COMMON_DS_L1D: data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1; data_src->mem_snoop = PERF_MEM_SNOOP_NONE; break; - case ARM_SPE_NV_L2: + case ARM_SPE_COMMON_DS_L2: data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2; data_src->mem_snoop = PERF_MEM_SNOOP_NONE; break; - case ARM_SPE_NV_PEER_CORE: + case ARM_SPE_COMMON_DS_PEER_CORE: data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2; data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER; @@ -460,8 +460,8 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec * We don't know if this is L1, L2 but we do know it was a cache-2-cache * transfer, so set SNOOPX_PEER */ - case ARM_SPE_NV_LOCAL_CLUSTER: - case ARM_SPE_NV_PEER_CLUSTER: + case ARM_SPE_COMMON_DS_LOCAL_CLUSTER: + case ARM_SPE_COMMON_DS_PEER_CLUSTER: data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3; data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER; @@ -469,7 +469,7 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec /* * System cache is assumed to be L3 */ - case ARM_SPE_NV_SYS_CACHE: + case ARM_SPE_COMMON_DS_SYS_CACHE: data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3; data_src->mem_snoop = PERF_MEM_SNOOP_HIT; @@ -478,13 +478,13 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec * We don't know what level it hit in, except it came from the other * socket */ - case ARM_SPE_NV_REMOTE: - data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1; - data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE; + case ARM_SPE_COMMON_DS_REMOTE: + data_src->mem_lvl = PERF_MEM_LVL_NA; + data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA; data_src->mem_remote = PERF_MEM_REMOTE_REMOTE; data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER; break; - case ARM_SPE_NV_DRAM: + case ARM_SPE_COMMON_DS_DRAM: data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM; data_src->mem_snoop = PERF_MEM_SNOOP_NONE; @@ -514,13 +514,13 @@ static void arm_spe__synth_data_source_generic(const struct arm_spe_record *reco } if (record->type & ARM_SPE_REMOTE_ACCESS) - data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1; + data_src->mem_remote = PERF_MEM_REMOTE_REMOTE; } static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr) { union perf_mem_data_src data_src = { .mem_op = PERF_MEM_OP_NA }; - bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe); + bool is_common = is_midr_in_range_list(midr, common_ds_encoding_cpus); /* Only synthesize data source for LDST operations */ if (!is_ldst_op(record->op)) @@ -533,8 +533,8 @@ static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 m else return 0; - if (is_neoverse) - arm_spe__synth_data_source_neoverse(record, &data_src); + if (is_common) + arm_spe__synth_data_source_common(record, &data_src); else arm_spe__synth_data_source_generic(record, &data_src); diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c index 648e8d87ef194..a228a7ba30caa 100644 --- a/tools/perf/util/disasm.c +++ b/tools/perf/util/disasm.c @@ -389,13 +389,16 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s * skip over possible up to 2 operands to get to address, e.g.: * tbnz w0, #26, ffff0000083cd190 */ - if (c++ != NULL) { + if (c != NULL) { + c++; ops->target.addr = strtoull(c, NULL, 16); if (!ops->target.addr) { c = strchr(c, ','); c = validate_comma(c, ops); - if (c++ != NULL) + if (c != NULL) { + c++; ops->target.addr = strtoull(c, NULL, 16); + } } } else { ops->target.addr = strtoull(ops->raw, NULL, 16); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 6d7249cc1a993..dda107b12b8c6 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -3237,7 +3237,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err, /* If event has exclude user then don't exclude kernel. */ if (evsel->core.attr.exclude_user) - return false; + goto no_fallback; /* Is there already the separator in the name. */ if (strchr(name, '/') || @@ -3245,7 +3245,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err, sep = ""; if (asprintf(&new_name, "%s%su", name, sep) < 0) - return false; + goto no_fallback; free(evsel->name); evsel->name = new_name; @@ -3256,8 +3256,31 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err, evsel->core.attr.exclude_hv = 1; return true; - } + } else if (err == EOPNOTSUPP && !evsel->core.attr.exclude_guest && + !evsel->exclude_GH) { + const char *name = evsel__name(evsel); + char *new_name; + const char *sep = ":"; + + /* Is there already the separator in the name. */ + if (strchr(name, '/') || + (strchr(name, ':') && !evsel->is_libpfm_event)) + sep = ""; + + if (asprintf(&new_name, "%s%sH", name, sep) < 0) + goto no_fallback; + free(evsel->name); + evsel->name = new_name; + /* Apple M1 requires exclude_guest */ + scnprintf(msg, msgsize, "Trying to fall back to excluding guest samples"); + evsel->core.attr.exclude_guest = 1; + + return true; + } +no_fallback: + scnprintf(msg, msgsize, "No fallback found for '%s' for error %d", + evsel__name(evsel), err); return false; } @@ -3497,6 +3520,8 @@ bool evsel__is_hybrid(const struct evsel *evsel) struct evsel *evsel__leader(const struct evsel *evsel) { + if (evsel->core.leader == NULL) + return NULL; return container_of(evsel->core.leader, struct evsel, core); } diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c index af9a97612f9df..f61574d1581e3 100644 --- a/tools/perf/util/lzma.c +++ b/tools/perf/util/lzma.c @@ -113,7 +113,7 @@ bool lzma_is_compressed(const char *input) ssize_t rc; if (fd < 0) - return -1; + return false; rc = read(fd, buf, sizeof(buf)); close(fd); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index dbaf07bf6c5fb..89e5354fa094c 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1371,7 +1371,7 @@ static s64 perf_session__process_user_event(struct perf_session *session, const struct perf_tool *tool = session->tool; struct perf_sample sample = { .time = 0, }; int fd = perf_data__fd(session->data); - int err; + s64 err; if (event->header.type != PERF_RECORD_COMPRESSED || perf_tool__compressed_is_stub(tool)) dump_event(session->evlist, event, file_offset, &sample, file_path); diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index 649550e9b7aa8..abb567de3e8a9 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -1,6 +1,7 @@ from os import getenv, path from subprocess import Popen, PIPE from re import sub +import shlex cc = getenv("CC") @@ -16,7 +17,9 @@ src_feature_tests = getenv('srctree') + '/tools/build/feature' def clang_has_option(option): - cc_output = Popen([cc, cc_options + option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines() + cmd = shlex.split(f"{cc} {cc_options} {option}") + cmd.append(path.join(src_feature_tests, "test-hello.c")) + cc_output = Popen(cmd, stderr=PIPE).stderr.readlines() return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o) or (b"unknown warning option" in o))] == [ ] if cc_is_clang: diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c index 78d2297c1b674..1f7c065230599 100644 --- a/tools/perf/util/zlib.c +++ b/tools/perf/util/zlib.c @@ -88,7 +88,7 @@ bool gzip_is_compressed(const char *input) ssize_t rc; if (fd < 0) - return -1; + return false; rc = read(fd, buf, sizeof(buf)); close(fd); diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c index 5d1f880d1149e..e952f525599bd 100644 --- a/tools/sched_ext/scx_qmap.bpf.c +++ b/tools/sched_ext/scx_qmap.bpf.c @@ -56,7 +56,8 @@ struct qmap { queue1 SEC(".maps"), queue2 SEC(".maps"), queue3 SEC(".maps"), - queue4 SEC(".maps"); + queue4 SEC(".maps"), + dump_store SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); @@ -578,11 +579,26 @@ void BPF_STRUCT_OPS(qmap_dump, struct scx_dump_ctx *dctx) return; scx_bpf_dump("QMAP FIFO[%d]:", i); + + /* + * Dump can be invoked anytime and there is no way to iterate in + * a non-destructive way. Pop and store in dump_store and then + * restore afterwards. If racing against new enqueues, ordering + * can get mixed up. + */ bpf_repeat(4096) { if (bpf_map_pop_elem(fifo, &pid)) break; + bpf_map_push_elem(&dump_store, &pid, 0); scx_bpf_dump(" %d", pid); } + + bpf_repeat(4096) { + if (bpf_map_pop_elem(&dump_store, &pid)) + break; + bpf_map_push_elem(fifo, &pid, 0); + } + scx_bpf_dump("\n"); } } diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c index 892e990c034ab..22ee6309c53c1 100644 --- a/tools/testing/nvdimm/test/ndtest.c +++ b/tools/testing/nvdimm/test/ndtest.c @@ -850,11 +850,22 @@ static int ndtest_probe(struct platform_device *pdev) p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, sizeof(dma_addr_t), GFP_KERNEL); + if (!p->dcr_dma) { + rc = -ENOMEM; + goto err; + } p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, sizeof(dma_addr_t), GFP_KERNEL); + if (!p->label_dma) { + rc = -ENOMEM; + goto err; + } p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, sizeof(dma_addr_t), GFP_KERNEL); - + if (!p->dimm_dma) { + rc = -ENOMEM; + goto err; + } rc = ndtest_nvdimm_init(p); if (rc) goto err; diff --git a/tools/testing/selftests/arm64/pauth/exec_target.c b/tools/testing/selftests/arm64/pauth/exec_target.c index 4435600ca400d..e597861b26d6b 100644 --- a/tools/testing/selftests/arm64/pauth/exec_target.c +++ b/tools/testing/selftests/arm64/pauth/exec_target.c @@ -13,7 +13,12 @@ int main(void) unsigned long hwcaps; size_t val; - fread(&val, sizeof(size_t), 1, stdin); + size_t size = fread(&val, sizeof(size_t), 1, stdin); + + if (size != 1) { + fprintf(stderr, "Could not read input from stdin\n"); + return EXIT_FAILURE; + } /* don't try to execute illegal (unimplemented) instructions) caller * should have checked this and keep worker simple diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c index bb143de68875c..e27d66b75fb1f 100644 --- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c +++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c @@ -144,11 +144,17 @@ static void test_parse_test_list_file(void) if (!ASSERT_OK(ferror(fp), "prepare tmp")) goto out_fclose; + if (!ASSERT_OK(fsync(fileno(fp)), "fsync tmp")) + goto out_fclose; + init_test_filter_set(&set); - ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file"); + if (!ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file")) + goto out_fclose; + + if (!ASSERT_EQ(set.cnt, 4, "test count")) + goto out_free_set; - ASSERT_EQ(set.cnt, 4, "test count"); ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name"); ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count"); ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name"); @@ -158,8 +164,8 @@ static void test_parse_test_list_file(void) ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name"); ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name"); +out_free_set: free_test_filter_set(&set); - out_fclose: fclose(fp); out_remove: diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c index 540181c115a85..ef00d38b0a8d2 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c @@ -23,7 +23,6 @@ struct { struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); - __uint(max_entries, 2); __type(key, int); __type(value, __u32); } perf_event_map SEC(".maps"); diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c index 595194453ff8f..35b4893ccdf8a 100644 --- a/tools/testing/selftests/bpf/test_tcpnotify_user.c +++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c @@ -15,20 +15,18 @@ #include #include #include -#include #include -#include -#include "bpf_util.h" #include "cgroup_helpers.h" #include "test_tcpnotify.h" -#include "trace_helpers.h" #include "testing_helpers.h" #define SOCKET_BUFFER_SIZE (getpagesize() < 8192L ? getpagesize() : 8192L) pthread_t tid; +static bool exit_thread; + int rx_callbacks; static void dummyfn(void *ctx, int cpu, void *data, __u32 size) @@ -45,7 +43,7 @@ void tcp_notifier_poller(struct perf_buffer *pb) { int err; - while (1) { + while (!exit_thread) { err = perf_buffer__poll(pb, 100); if (err < 0 && err != -EINTR) { printf("failed perf_buffer__poll: %d\n", err); @@ -78,15 +76,10 @@ int main(int argc, char **argv) int error = EXIT_FAILURE; struct bpf_object *obj; char test_script[80]; - cpu_set_t cpuset; __u32 key = 0; libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - CPU_ZERO(&cpuset); - CPU_SET(0, &cpuset); - pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); - cg_fd = cgroup_setup_and_join(cg_path); if (cg_fd < 0) goto err; @@ -151,6 +144,13 @@ int main(int argc, char **argv) sleep(10); + exit_thread = true; + int ret = pthread_join(tid, NULL); + if (ret) { + printf("FAILED: pthread_join\n"); + goto err; + } + if (verify_result(&g)) { printf("FAILED: Wrong stats Expected %d calls, got %d\n", g.ncalls, rx_callbacks); diff --git a/tools/testing/selftests/mm/madv_populate.c b/tools/testing/selftests/mm/madv_populate.c index ef7d911da13e0..16f5754b8b1f7 100644 --- a/tools/testing/selftests/mm/madv_populate.c +++ b/tools/testing/selftests/mm/madv_populate.c @@ -264,23 +264,6 @@ static void test_softdirty(void) munmap(addr, SIZE); } -static int system_has_softdirty(void) -{ - /* - * There is no way to check if the kernel supports soft-dirty, other - * than by writing to a page and seeing if the bit was set. But the - * tests are intended to check that the bit gets set when it should, so - * doing that check would turn a potentially legitimate fail into a - * skip. Fortunately, we know for sure that arm64 does not support - * soft-dirty. So for now, let's just use the arch as a corse guide. - */ -#if defined(__aarch64__) - return 0; -#else - return 1; -#endif -} - int main(int argc, char **argv) { int nr_tests = 16; @@ -288,7 +271,7 @@ int main(int argc, char **argv) pagesize = getpagesize(); - if (system_has_softdirty()) + if (softdirty_supported()) nr_tests += 5; ksft_print_header(); @@ -300,7 +283,7 @@ int main(int argc, char **argv) test_holes(); test_populate_read(); test_populate_write(); - if (system_has_softdirty()) + if (softdirty_supported()) test_softdirty(); err = ksft_get_fail_cnt(); diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c index bdfa5d085f009..7b91df12ce5b9 100644 --- a/tools/testing/selftests/mm/soft-dirty.c +++ b/tools/testing/selftests/mm/soft-dirty.c @@ -193,8 +193,11 @@ int main(int argc, char **argv) int pagesize; ksft_print_header(); - ksft_set_plan(15); + if (!softdirty_supported()) + ksft_exit_skip("soft-dirty is not support\n"); + + ksft_set_plan(15); pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY); if (pagemap_fd < 0) ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH); diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c index d8d0cf04bb57f..a4a2805d3d3e7 100644 --- a/tools/testing/selftests/mm/vm_util.c +++ b/tools/testing/selftests/mm/vm_util.c @@ -193,6 +193,42 @@ unsigned long rss_anon(void) return rss_anon; } +char *__get_smap_entry(void *addr, const char *pattern, char *buf, size_t len) +{ + int ret; + FILE *fp; + char *entry = NULL; + char addr_pattern[MAX_LINE_LENGTH]; + + ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-", + (unsigned long)addr); + if (ret >= MAX_LINE_LENGTH) + ksft_exit_fail_msg("%s: Pattern is too long\n", __func__); + + fp = fopen(SMAP_FILE_PATH, "r"); + if (!fp) + ksft_exit_fail_msg("%s: Failed to open file %s\n", __func__, + SMAP_FILE_PATH); + + if (!check_for_pattern(fp, addr_pattern, buf, len)) + goto err_out; + + /* Fetch the pattern in the same block */ + if (!check_for_pattern(fp, pattern, buf, len)) + goto err_out; + + /* Trim trailing newline */ + entry = strchr(buf, '\n'); + if (entry) + *entry = '\0'; + + entry = buf + strlen(pattern); + +err_out: + fclose(fp); + return entry; +} + bool __check_huge(void *addr, char *pattern, int nr_hpages, uint64_t hpage_size) { @@ -384,3 +420,44 @@ unsigned long get_free_hugepages(void) fclose(f); return fhp; } + +static bool check_vmflag(void *addr, const char *flag) +{ + char buffer[MAX_LINE_LENGTH]; + const char *flags; + size_t flaglen; + + flags = __get_smap_entry(addr, "VmFlags:", buffer, sizeof(buffer)); + if (!flags) + ksft_exit_fail_msg("%s: No VmFlags for %p\n", __func__, addr); + + while (true) { + flags += strspn(flags, " "); + + flaglen = strcspn(flags, " "); + if (!flaglen) + return false; + + if (flaglen == strlen(flag) && !memcmp(flags, flag, flaglen)) + return true; + + flags += flaglen; + } +} + +bool softdirty_supported(void) +{ + char *addr; + bool supported = false; + const size_t pagesize = getpagesize(); + + /* New mappings are expected to be marked with VM_SOFTDIRTY (sd). */ + addr = mmap(0, pagesize, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); + if (!addr) + ksft_exit_fail_msg("mmap failed\n"); + + supported = check_vmflag(addr, "sd"); + munmap(addr, pagesize); + return supported; +} diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h index 2eaed82099255..823d07d84ad01 100644 --- a/tools/testing/selftests/mm/vm_util.h +++ b/tools/testing/selftests/mm/vm_util.h @@ -53,6 +53,7 @@ int uffd_unregister(int uffd, void *addr, uint64_t len); int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len, bool miss, bool wp, bool minor, uint64_t *ioctls); unsigned long get_free_hugepages(void); +bool softdirty_supported(void); /* * On ppc64 this will only work with radix 2M hugepage size diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 77c83d9508d3b..6845da9902818 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -465,8 +465,8 @@ ipv6_fdb_grp_fcnal() log_test $? 0 "Get Fdb nexthop group by id" # fdb nexthop group can only contain fdb nexthops - run_cmd "$IP nexthop add id 63 via 2001:db8:91::4" - run_cmd "$IP nexthop add id 64 via 2001:db8:91::5" + run_cmd "$IP nexthop add id 63 via 2001:db8:91::4 dev veth1" + run_cmd "$IP nexthop add id 64 via 2001:db8:91::5 dev veth1" run_cmd "$IP nexthop add id 103 group 63/64 fdb" log_test $? 2 "Fdb Nexthop group with non-fdb nexthops" @@ -545,15 +545,15 @@ ipv4_fdb_grp_fcnal() log_test $? 0 "Get Fdb nexthop group by id" # fdb nexthop group can only contain fdb nexthops - run_cmd "$IP nexthop add id 14 via 172.16.1.2" - run_cmd "$IP nexthop add id 15 via 172.16.1.3" + run_cmd "$IP nexthop add id 14 via 172.16.1.2 dev veth1" + run_cmd "$IP nexthop add id 15 via 172.16.1.3 dev veth1" run_cmd "$IP nexthop add id 103 group 14/15 fdb" log_test $? 2 "Fdb Nexthop group with non-fdb nexthops" # Non fdb nexthop group can not contain fdb nexthops run_cmd "$IP nexthop add id 16 via 172.16.1.2 fdb" run_cmd "$IP nexthop add id 17 via 172.16.1.3 fdb" - run_cmd "$IP nexthop add id 104 group 14/15" + run_cmd "$IP nexthop add id 104 group 16/17" log_test $? 2 "Non-Fdb Nexthop group with fdb nexthops" # fdb nexthop cannot have blackhole @@ -580,7 +580,7 @@ ipv4_fdb_grp_fcnal() run_cmd "$BRIDGE fdb add 02:02:00:00:00:14 dev vx10 nhid 12 self" log_test $? 255 "Fdb mac add with nexthop" - run_cmd "$IP ro add 172.16.0.0/22 nhid 15" + run_cmd "$IP ro add 172.16.0.0/22 nhid 16" log_test $? 2 "Route add with fdb nexthop" run_cmd "$IP ro add 172.16.0.0/22 nhid 103" diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index c07e2bd3a315a..ec7547bdda89f 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -3160,6 +3160,17 @@ deny_join_id0_tests() run_tests $ns1 $ns2 10.0.1.1 chk_join_nr 1 1 1 fi + + # default limits, server deny join id 0 + signal + if reset_with_allow_join_id0 "default limits, server deny join id 0" 0 1; then + pm_nl_set_limits $ns1 0 2 + pm_nl_set_limits $ns2 0 2 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal + pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow + pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 2 2 2 + fi } fullmesh_tests() @@ -3711,7 +3722,7 @@ endpoint_tests() # subflow_rebuild_header is needed to support the implicit flag # userspace pm type prevents add_addr if reset "implicit EP" && - mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then pm_nl_set_limits $ns1 2 2 pm_nl_set_limits $ns2 2 2 pm_nl_add_endpoint $ns1 10.0.2.1 flags signal @@ -3736,7 +3747,7 @@ endpoint_tests() fi if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT && - mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then start_events pm_nl_set_limits $ns1 0 3 pm_nl_set_limits $ns2 0 3 @@ -3812,7 +3823,8 @@ endpoint_tests() # remove and re-add if reset_with_events "delete re-add signal" && - mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then + ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=0 pm_nl_set_limits $ns1 0 3 pm_nl_set_limits $ns2 3 3 pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal @@ -3886,7 +3898,7 @@ endpoint_tests() # flush and re-add if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT && - mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then pm_nl_set_limits $ns1 0 2 pm_nl_set_limits $ns2 1 2 # broadcast IP: no packet for this address will be received on ns1 diff --git a/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh b/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh index 1014551dd7694..6731fe1eaf2e9 100755 --- a/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh +++ b/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh @@ -17,9 +17,31 @@ cleanup() checktool "socat -h" "run test without socat" checktool "iptables --version" "run test without iptables" +checktool "conntrack --version" "run test without conntrack" trap cleanup EXIT +connect_done() +{ + local ns="$1" + local port="$2" + + ip netns exec "$ns" ss -nt -o state established "dport = :$port" | grep -q "$port" +} + +check_ctstate() +{ + local ns="$1" + local dp="$2" + + if ! ip netns exec "$ns" conntrack --get -s 192.168.1.2 -d 192.168.1.1 -p tcp \ + --sport 10000 --dport "$dp" --state ESTABLISHED > /dev/null 2>&1;then + echo "FAIL: Did not find expected state for dport $2" + ip netns exec "$ns" bash -c 'conntrack -L; conntrack -S; ss -nt' + ret=1 + fi +} + setup_ns ns1 ns2 # Connect the namespaces using a veth pair @@ -44,15 +66,18 @@ socatpid=$! ip netns exec "$ns2" sysctl -q net.ipv4.ip_local_port_range="10000 10000" # add a virtual IP using DNAT -ip netns exec "$ns2" iptables -t nat -A OUTPUT -d 10.96.0.1/32 -p tcp --dport 443 -j DNAT --to-destination 192.168.1.1:5201 +ip netns exec "$ns2" iptables -t nat -A OUTPUT -d 10.96.0.1/32 -p tcp --dport 443 -j DNAT --to-destination 192.168.1.1:5201 || exit 1 # ... and route it to the other namespace ip netns exec "$ns2" ip route add 10.96.0.1 via 192.168.1.1 -# add a persistent connection from the other namespace -ip netns exec "$ns2" socat -t 10 - TCP:192.168.1.1:5201 > /dev/null & +# listener should be up by now, wait if it isn't yet. +wait_local_port_listen "$ns1" 5201 tcp -sleep 1 +# add a persistent connection from the other namespace +sleep 10 | ip netns exec "$ns2" socat -t 10 - TCP:192.168.1.1:5201 > /dev/null & +cpid0=$! +busywait "$BUSYWAIT_TIMEOUT" connect_done "$ns2" "5201" # ip daddr:dport will be rewritten to 192.168.1.1 5201 # NAT must reallocate source port 10000 because @@ -71,26 +96,25 @@ fi ip netns exec "$ns1" iptables -t nat -A PREROUTING -p tcp --dport 5202 -j REDIRECT --to-ports 5201 ip netns exec "$ns1" iptables -t nat -A PREROUTING -p tcp --dport 5203 -j REDIRECT --to-ports 5201 -sleep 5 | ip netns exec "$ns2" socat -t 5 -u STDIN TCP:192.168.1.1:5202,connect-timeout=5 >/dev/null & +sleep 5 | ip netns exec "$ns2" socat -T 5 -u STDIN TCP:192.168.1.1:5202,connect-timeout=5 >/dev/null & +cpid1=$! -# if connect succeeds, client closes instantly due to EOF on stdin. -# if connect hangs, it will time out after 5s. -echo | ip netns exec "$ns2" socat -t 3 -u STDIN TCP:192.168.1.1:5203,connect-timeout=5 >/dev/null & +sleep 5 | ip netns exec "$ns2" socat -T 5 -u STDIN TCP:192.168.1.1:5203,connect-timeout=5 >/dev/null & cpid2=$! -time_then=$(date +%s) -wait $cpid2 -rv=$? -time_now=$(date +%s) +busywait "$BUSYWAIT_TIMEOUT" connect_done "$ns2" 5202 +busywait "$BUSYWAIT_TIMEOUT" connect_done "$ns2" 5203 -# Check how much time has elapsed, expectation is for -# 'cpid2' to connect and then exit (and no connect delay). -delta=$((time_now - time_then)) +check_ctstate "$ns1" 5202 +check_ctstate "$ns1" 5203 -if [ $delta -lt 2 ] && [ $rv -eq 0 ]; then +kill $socatpid $cpid0 $cpid1 $cpid2 +socatpid=0 + +if [ $ret -eq 0 ]; then echo "PASS: could connect to service via redirected ports" else - echo "FAIL: socat cannot connect to service via redirect ($delta seconds elapsed, returned $rv)" + echo "FAIL: socat cannot connect to service via redirect" ret=1 fi diff --git a/tools/testing/selftests/net/sctp_hello.c b/tools/testing/selftests/net/sctp_hello.c index f02f1f95d2275..a04dac0b8027d 100644 --- a/tools/testing/selftests/net/sctp_hello.c +++ b/tools/testing/selftests/net/sctp_hello.c @@ -29,7 +29,6 @@ static void set_addr(struct sockaddr_storage *ss, char *ip, char *port, int *len static int do_client(int argc, char *argv[]) { struct sockaddr_storage ss; - char buf[] = "hello"; int csk, ret, len; if (argc < 5) { @@ -56,16 +55,10 @@ static int do_client(int argc, char *argv[]) set_addr(&ss, argv[3], argv[4], &len); ret = connect(csk, (struct sockaddr *)&ss, len); - if (ret < 0) { - printf("failed to connect to peer\n"); + if (ret < 0) return -1; - } - ret = send(csk, buf, strlen(buf) + 1, 0); - if (ret < 0) { - printf("failed to send msg %d\n", ret); - return -1; - } + recv(csk, NULL, 0, 0); close(csk); return 0; @@ -75,7 +68,6 @@ int main(int argc, char *argv[]) { struct sockaddr_storage ss; int lsk, csk, ret, len; - char buf[20]; if (argc < 2 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) { printf("%s server|client ...\n", argv[0]); @@ -125,11 +117,6 @@ int main(int argc, char *argv[]) return -1; } - ret = recv(csk, buf, sizeof(buf), 0); - if (ret <= 0) { - printf("failed to recv msg %d\n", ret); - return -1; - } close(csk); close(lsk); diff --git a/tools/testing/selftests/net/sctp_vrf.sh b/tools/testing/selftests/net/sctp_vrf.sh index c854034b6aa16..667b211aa8a11 100755 --- a/tools/testing/selftests/net/sctp_vrf.sh +++ b/tools/testing/selftests/net/sctp_vrf.sh @@ -20,9 +20,9 @@ setup() { modprobe sctp_diag setup_ns CLIENT_NS1 CLIENT_NS2 SERVER_NS - ip net exec $CLIENT_NS1 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null - ip net exec $CLIENT_NS2 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null - ip net exec $SERVER_NS sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null + ip net exec $CLIENT_NS1 sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip net exec $CLIENT_NS2 sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip net exec $SERVER_NS sysctl -wq net.ipv6.conf.default.accept_dad=0 ip -n $SERVER_NS link add veth1 type veth peer name veth1 netns $CLIENT_NS1 ip -n $SERVER_NS link add veth2 type veth peer name veth1 netns $CLIENT_NS2 @@ -62,17 +62,40 @@ setup() { } cleanup() { - ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null + wait_client $CLIENT_NS1 + wait_client $CLIENT_NS2 + stop_server cleanup_ns $CLIENT_NS1 $CLIENT_NS2 $SERVER_NS } -wait_server() { +start_server() { local IFACE=$1 local CNT=0 - until ip netns exec $SERVER_NS ss -lS src $SERVER_IP:$SERVER_PORT | \ - grep LISTEN | grep "$IFACE" 2>&1 >/dev/null; do - [ $((CNT++)) = "20" ] && { RET=3; return $RET; } + ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP $SERVER_PORT $IFACE & + disown + until ip netns exec $SERVER_NS ss -SlH | grep -q "$IFACE"; do + [ $((CNT++)) -eq 30 ] && { RET=3; return $RET; } + sleep 0.1 + done +} + +stop_server() { + local CNT=0 + + ip netns exec $SERVER_NS pkill sctp_hello + while ip netns exec $SERVER_NS ss -SaH | grep -q .; do + [ $((CNT++)) -eq 30 ] && break + sleep 0.1 + done +} + +wait_client() { + local CLIENT_NS=$1 + local CNT=0 + + while ip netns exec $CLIENT_NS ss -SaH | grep -q .; do + [ $((CNT++)) -eq 30 ] && break sleep 0.1 done } @@ -81,14 +104,12 @@ do_test() { local CLIENT_NS=$1 local IFACE=$2 - ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null - ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \ - $SERVER_PORT $IFACE 2>&1 >/dev/null & - disown - wait_server $IFACE || return $RET + start_server $IFACE || return $RET timeout 3 ip netns exec $CLIENT_NS ./sctp_hello client $AF \ - $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null + $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT RET=$? + wait_client $CLIENT_NS + stop_server return $RET } @@ -96,25 +117,21 @@ do_testx() { local IFACE1=$1 local IFACE2=$2 - ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null - ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \ - $SERVER_PORT $IFACE1 2>&1 >/dev/null & - disown - wait_server $IFACE1 || return $RET - ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \ - $SERVER_PORT $IFACE2 2>&1 >/dev/null & - disown - wait_server $IFACE2 || return $RET + start_server $IFACE1 || return $RET + start_server $IFACE2 || return $RET timeout 3 ip netns exec $CLIENT_NS1 ./sctp_hello client $AF \ - $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null && \ + $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT && \ timeout 3 ip netns exec $CLIENT_NS2 ./sctp_hello client $AF \ - $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null + $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT RET=$? + wait_client $CLIENT_NS1 + wait_client $CLIENT_NS2 + stop_server return $RET } testup() { - ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=1 2>&1 >/dev/null + ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=1 echo -n "TEST 01: nobind, connect from client 1, l3mdev_accept=1, Y " do_test $CLIENT_NS1 || { echo "[FAIL]"; return $RET; } echo "[PASS]" @@ -123,7 +140,7 @@ testup() { do_test $CLIENT_NS2 && { echo "[FAIL]"; return $RET; } echo "[PASS]" - ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=0 2>&1 >/dev/null + ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=0 echo -n "TEST 03: nobind, connect from client 1, l3mdev_accept=0, N " do_test $CLIENT_NS1 && { echo "[FAIL]"; return $RET; } echo "[PASS]" @@ -160,7 +177,7 @@ testup() { do_testx vrf-1 vrf-2 || { echo "[FAIL]"; return $RET; } echo "[PASS]" - echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, N " + echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, Y " do_testx vrf-2 vrf-1 || { echo "[FAIL]"; return $RET; } echo "[PASS]" } diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c index 6fba7025c5e3c..d73bff23c61e7 100644 --- a/tools/testing/selftests/nolibc/nolibc-test.c +++ b/tools/testing/selftests/nolibc/nolibc-test.c @@ -196,8 +196,8 @@ int expect_zr(int expr, int llen) } -#define EXPECT_NZ(cond, expr, val) \ - do { if (!(cond)) result(llen, SKIPPED); else ret += expect_nz(expr, llen; } while (0) +#define EXPECT_NZ(cond, expr) \ + do { if (!(cond)) result(llen, SKIPPED); else ret += expect_nz(expr, llen); } while (0) static __attribute__((unused)) int expect_nz(int expr, int llen) diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index f6156790c3b4d..05dc77fd527b3 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -40,9 +40,9 @@ * Define weak versions to play nice with binaries that are statically linked * against a libc that doesn't support registering its own rseq. */ -__weak ptrdiff_t __rseq_offset; -__weak unsigned int __rseq_size; -__weak unsigned int __rseq_flags; +extern __weak ptrdiff_t __rseq_offset; +extern __weak unsigned int __rseq_size; +extern __weak unsigned int __rseq_flags; static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset; static const unsigned int *libc_rseq_size_p = &__rseq_size; @@ -198,7 +198,7 @@ void rseq_init(void) * libc not having registered a restartable sequence. Try to find the * symbols if that's the case. */ - if (!*libc_rseq_size_p) { + if (!libc_rseq_size_p || !*libc_rseq_size_p) { libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); diff --git a/tools/testing/selftests/vDSO/vdso_call.h b/tools/testing/selftests/vDSO/vdso_call.h index bb237d771051b..e7205584cbdca 100644 --- a/tools/testing/selftests/vDSO/vdso_call.h +++ b/tools/testing/selftests/vDSO/vdso_call.h @@ -44,7 +44,6 @@ register long _r6 asm ("r6"); \ register long _r7 asm ("r7"); \ register long _r8 asm ("r8"); \ - register long _rval asm ("r3"); \ \ LOADARGS_##nr(fn, args); \ \ @@ -54,13 +53,13 @@ " bns+ 1f\n" \ " neg 3, 3\n" \ "1:" \ - : "+r" (_r0), "=r" (_r3), "+r" (_r4), "+r" (_r5), \ + : "+r" (_r0), "+r" (_r3), "+r" (_r4), "+r" (_r5), \ "+r" (_r6), "+r" (_r7), "+r" (_r8) \ - : "r" (_rval) \ + : \ : "r9", "r10", "r11", "r12", "cr0", "cr1", "cr5", \ "cr6", "cr7", "xer", "lr", "ctr", "memory" \ ); \ - _rval; \ + _r3; \ }) #else diff --git a/tools/testing/selftests/vDSO/vdso_test_abi.c b/tools/testing/selftests/vDSO/vdso_test_abi.c index a54424e2336f4..67cbfc56e4e1b 100644 --- a/tools/testing/selftests/vDSO/vdso_test_abi.c +++ b/tools/testing/selftests/vDSO/vdso_test_abi.c @@ -182,12 +182,11 @@ int main(int argc, char **argv) unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR); ksft_print_header(); - ksft_set_plan(VDSO_TEST_PLAN); - if (!sysinfo_ehdr) { - ksft_print_msg("AT_SYSINFO_EHDR is not present!\n"); - return KSFT_SKIP; - } + if (!sysinfo_ehdr) + ksft_exit_skip("AT_SYSINFO_EHDR is not present!\n"); + + ksft_set_plan(VDSO_TEST_PLAN); version = versions[VDSO_VERSION]; name = (const char **)&names[VDSO_NAMES]; diff --git a/tools/testing/selftests/watchdog/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c index a1f506ba55786..4f09c5db0c7f3 100644 --- a/tools/testing/selftests/watchdog/watchdog-test.c +++ b/tools/testing/selftests/watchdog/watchdog-test.c @@ -332,6 +332,12 @@ int main(int argc, char *argv[]) if (oneshot) goto end; + /* Check if WDIOF_KEEPALIVEPING is supported */ + if (!(info.options & WDIOF_KEEPALIVEPING)) { + printf("WDIOC_KEEPALIVE not supported by this device\n"); + goto end; + } + printf("Watchdog Ticking Away!\n"); /*