summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-03-13 18:09:03 -0400
committerMike Pagano <mpagano@gentoo.org>2019-03-13 18:09:03 -0400
commit6d334d71d619bf689f1adf37e57d1717ebb1ee66 (patch)
treec654ae2ab363728512f938ffc104bb2ca10b3350
parentproj/linux-patches: Linux patch 4.20.15 (diff)
downloadlinux-patches-4.20-17.tar.gz
linux-patches-4.20-17.tar.bz2
linux-patches-4.20-17.zip
proj/linux-patches: Linux patch 4.20.164.20-17
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1015_linux-4.20.16.patch7205
2 files changed, 7209 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index dd61e24b..516aaa46 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch: 1014_linux-4.20.15.patch
From: http://www.kernel.org
Desc: Linux 4.20.15
+Patch: 1015_linux-4.20.16.patch
+From: http://www.kernel.org
+Desc: Linux 4.20.16
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1015_linux-4.20.16.patch b/1015_linux-4.20.16.patch
new file mode 100644
index 00000000..68b917ad
--- /dev/null
+++ b/1015_linux-4.20.16.patch
@@ -0,0 +1,7205 @@
+diff --git a/Makefile b/Makefile
+index 25b45c24bac0..2979ad27e16a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 20
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Shy Crocodile
+
+diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
+index 1d925ed2b102..8fbbad11a80c 100644
+--- a/arch/arm/boot/dts/am335x-shc.dts
++++ b/arch/arm/boot/dts/am335x-shc.dts
+@@ -215,7 +215,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ bus-width = <0x4>;
+- cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
++ cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+ cd-inverted;
+ max-frequency = <26000000>;
+ vmmc-supply = <&vmmcsd_fixed>;
+diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
+index 27a1ee28c3bb..94efca78c42f 100644
+--- a/arch/arm/boot/dts/exynos3250.dtsi
++++ b/arch/arm/boot/dts/exynos3250.dtsi
+@@ -168,6 +168,9 @@
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
++ clock-names = "clkout8";
++ clocks = <&cmu CLK_FIN_PLL>;
++ #clock-cells = <1>;
+ };
+
+ mipi_phy: video-phy {
+diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+index 2caa3132f34e..fe91b6828da3 100644
+--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
++++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+@@ -49,7 +49,7 @@
+ };
+
+ emmc_pwrseq: pwrseq {
+- pinctrl-0 = <&sd1_cd>;
++ pinctrl-0 = <&emmc_rstn>;
+ pinctrl-names = "default";
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
+@@ -161,12 +161,6 @@
+ cpu0-supply = <&buck2_reg>;
+ };
+
+-/* RSTN signal for eMMC */
+-&sd1_cd {
+- samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+- samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+-};
+-
+ &pinctrl_1 {
+ gpio_power_key: power_key {
+ samsung,pins = "gpx1-3";
+@@ -184,6 +178,11 @@
+ samsung,pins = "gpx3-7";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ };
++
++ emmc_rstn: emmc-rstn {
++ samsung,pins = "gpk1-2";
++ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
++ };
+ };
+
+ &ehci {
+diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+index 2fac4baf1eb4..934cec60577a 100644
+--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
++++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+@@ -467,7 +467,7 @@
+ buck8_reg: BUCK8 {
+ regulator-name = "vdd_1.8v_ldo";
+ regulator-min-microvolt = <800000>;
+- regulator-max-microvolt = <1500000>;
++ regulator-max-microvolt = <2000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
+index 95a3c1cb877d..89ba48f4273b 100644
+--- a/arch/arm/boot/dts/imx6sx.dtsi
++++ b/arch/arm/boot/dts/imx6sx.dtsi
+@@ -462,7 +462,7 @@
+ };
+
+ gpt: gpt@2098000 {
+- compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
++ compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
+ reg = <0x02098000 0x4000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SX_CLK_GPT_BUS>,
+diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
+index 0d9faf1a51ea..a86b89086334 100644
+--- a/arch/arm/boot/dts/meson.dtsi
++++ b/arch/arm/boot/dts/meson.dtsi
+@@ -263,7 +263,7 @@
+ compatible = "amlogic,meson6-dwmac", "snps,dwmac";
+ reg = <0xc9410000 0x10000
+ 0xc1108108 0x4>;
+- interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
++ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
+index 0872f6e3abf5..d50fc2f60fa3 100644
+--- a/arch/arm/boot/dts/meson8b-ec100.dts
++++ b/arch/arm/boot/dts/meson8b-ec100.dts
+@@ -205,8 +205,7 @@
+ cap-sd-highspeed;
+ disable-wp;
+
+- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+- cd-inverted;
++ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
+
+ vmmc-supply = <&vcc_3v3>;
+ };
+diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
+index 58669abda259..0f0a46ddf3ff 100644
+--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
++++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
+@@ -221,7 +221,6 @@
+ /* Realtek RTL8211F (0x001cc916) */
+ eth_phy: ethernet-phy@0 {
+ reg = <0>;
+- eee-broken-1000t;
+ interrupt-parent = <&gpio_intc>;
+ /* GPIOH_3 */
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+@@ -273,8 +272,7 @@
+ cap-sd-highspeed;
+ disable-wp;
+
+- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+- cd-inverted;
++ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
+
+ vmmc-supply = <&tflash_vdd>;
+ vqmmc-supply = <&tf_io>;
+diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
+index f5853610b20b..6ac02beb5fa7 100644
+--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
++++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
+@@ -206,8 +206,7 @@
+ cap-sd-highspeed;
+ disable-wp;
+
+- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+- cd-inverted;
++ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
+
+ vmmc-supply = <&vcc_3v3>;
+ };
+diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+index ddc7a7bb33c0..f57acf8f66b9 100644
+--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
++++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+@@ -105,7 +105,7 @@
+ interrupts-extended = <
+ &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
+ &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
+- &cpcap 48 1
++ &cpcap 48 0
+ >;
+ interrupt-names =
+ "id_ground", "id_float", "se0conn", "vbusvld",
+diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
+index d5fe55392230..68e675258906 100644
+--- a/arch/arm/boot/dts/omap3-gta04.dtsi
++++ b/arch/arm/boot/dts/omap3-gta04.dtsi
+@@ -714,11 +714,7 @@
+
+ vdda-supply = <&vdac>;
+
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+ port {
+- reg = <0>;
+ venc_out: endpoint {
+ remote-endpoint = <&opa_in>;
+ ti,channels = <1>;
+diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
+index 182a53991c90..826920e6b878 100644
+--- a/arch/arm/boot/dts/omap3-n900.dts
++++ b/arch/arm/boot/dts/omap3-n900.dts
+@@ -814,7 +814,7 @@
+ /* For debugging, it is often good idea to remove this GPIO.
+ It means you can remove back cover (to reboot by removing
+ battery) and still use the MMC card. */
+- cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */
++ cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
+ };
+
+ /* most boards use vaux3, only some old versions use vmmc2 instead */
+diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
+index 0d9b85317529..e142e6c70a59 100644
+--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
++++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
+@@ -370,6 +370,19 @@
+ compatible = "ti,omap2-onenand";
+ reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
+
++ /*
++ * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
++ * bootloader set values when booted with v4.19 using both N950
++ * and N9 devices (OneNAND Manufacturer: Samsung):
++ *
++ * gpmc cs0 before gpmc_cs_program_settings:
++ * cs0 GPMC_CS_CONFIG1: 0xfd001202
++ * cs0 GPMC_CS_CONFIG2: 0x00181800
++ * cs0 GPMC_CS_CONFIG3: 0x00030300
++ * cs0 GPMC_CS_CONFIG4: 0x18001804
++ * cs0 GPMC_CS_CONFIG5: 0x03171d1d
++ * cs0 GPMC_CS_CONFIG6: 0x97080000
++ */
+ gpmc,sync-read;
+ gpmc,sync-write;
+ gpmc,burst-length = <16>;
+@@ -379,26 +392,27 @@
+ gpmc,device-width = <2>;
+ gpmc,mux-add-data = <2>;
+ gpmc,cs-on-ns = <0>;
+- gpmc,cs-rd-off-ns = <87>;
+- gpmc,cs-wr-off-ns = <87>;
++ gpmc,cs-rd-off-ns = <122>;
++ gpmc,cs-wr-off-ns = <122>;
+ gpmc,adv-on-ns = <0>;
+- gpmc,adv-rd-off-ns = <10>;
+- gpmc,adv-wr-off-ns = <10>;
+- gpmc,oe-on-ns = <15>;
+- gpmc,oe-off-ns = <87>;
++ gpmc,adv-rd-off-ns = <15>;
++ gpmc,adv-wr-off-ns = <15>;
++ gpmc,oe-on-ns = <20>;
++ gpmc,oe-off-ns = <122>;
+ gpmc,we-on-ns = <0>;
+- gpmc,we-off-ns = <87>;
+- gpmc,rd-cycle-ns = <112>;
+- gpmc,wr-cycle-ns = <112>;
+- gpmc,access-ns = <81>;
++ gpmc,we-off-ns = <122>;
++ gpmc,rd-cycle-ns = <148>;
++ gpmc,wr-cycle-ns = <148>;
++ gpmc,access-ns = <117>;
+ gpmc,page-burst-access-ns = <15>;
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,wait-monitoring-ns = <0>;
+- gpmc,clk-activation-ns = <5>;
+- gpmc,wr-data-mux-bus-ns = <30>;
+- gpmc,wr-access-ns = <81>;
+- gpmc,sync-clk-ps = <15000>;
++ gpmc,clk-activation-ns = <10>;
++ gpmc,wr-data-mux-bus-ns = <40>;
++ gpmc,wr-access-ns = <117>;
++
++ gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
+
+ /*
+ * MTD partition table corresponding to Nokia's MeeGo 1.2
+diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+index 5d23667dc2d2..25540b7694d5 100644
+--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
++++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+@@ -53,7 +53,7 @@
+
+ aliases {
+ serial0 = &uart0;
+- /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
++ ethernet0 = &emac;
+ ethernet1 = &sdiowifi;
+ };
+
+diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
+index ed36dcab80f1..f51919974183 100644
+--- a/arch/arm/plat-pxa/ssp.c
++++ b/arch/arm/plat-pxa/ssp.c
+@@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
+ if (ssp == NULL)
+ return -ENODEV;
+
+- iounmap(ssp->mmio_base);
+-
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+@@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
+ list_del(&ssp->node);
+ mutex_unlock(&ssp_lock);
+
+- kfree(ssp);
+ return 0;
+ }
+
+diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+index f4964bee6a1a..e80a792827ed 100644
+--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
++++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+@@ -118,6 +118,7 @@
+ reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+ clocks = <&pmic>;
+ clock-names = "ext_clock";
++ post-power-on-delay-ms = <10>;
+ power-off-delay-us = <10>;
+ };
+
+@@ -300,7 +301,6 @@
+
+ dwmmc_0: dwmmc0@f723d000 {
+ cap-mmc-highspeed;
+- mmc-hs200-1_8v;
+ non-removable;
+ bus-width = <0x8>;
+ vmmc-supply = <&ldo19>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index b29fe80d7288..461612a5ab5e 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -397,7 +397,7 @@
+ };
+
+ intc: interrupt-controller@9bc0000 {
+- compatible = "arm,gic-v3";
++ compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+index 1ec6aaa520c1..09320caea54e 100644
+--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+@@ -1160,6 +1160,9 @@
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
++ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
++ <&dmac2 0x13>, <&dmac2 0x12>;
++ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 310>;
+ status = "disabled";
+diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+index 83946ca2eba5..d59dada13722 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+@@ -1028,6 +1028,9 @@
+ <&cpg CPG_CORE R8A77965_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
++ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
++ <&dmac2 0x13>, <&dmac2 0x12>;
++ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ resets = <&cpg 310>;
+ status = "disabled";
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+index eb5e8bddb610..8954c8c6f547 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
++++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+@@ -101,6 +101,7 @@
+ sdio_pwrseq: sdio_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
++ post-power-on-delay-ms = <10>;
+ };
+ };
+
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
+index 2a5b338b2542..f17afb99890c 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
+ addr < (unsigned long)__entry_text_end) ||
+ (addr >= (unsigned long)__idmap_text_start &&
+ addr < (unsigned long)__idmap_text_end) ||
++ (addr >= (unsigned long)__hyp_text_start &&
++ addr < (unsigned long)__hyp_text_end) ||
+ !!search_exception_tables(addr))
+ return true;
+
+ if (!is_kernel_in_hyp_mode()) {
+- if ((addr >= (unsigned long)__hyp_text_start &&
+- addr < (unsigned long)__hyp_text_end) ||
+- (addr >= (unsigned long)__hyp_idmap_text_start &&
++ if ((addr >= (unsigned long)__hyp_idmap_text_start &&
+ addr < (unsigned long)__hyp_idmap_text_end))
+ return true;
+ }
+diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
+index 50cff3cbcc6d..4f7b1fa31cf5 100644
+--- a/arch/mips/boot/dts/ingenic/ci20.dts
++++ b/arch/mips/boot/dts/ingenic/ci20.dts
+@@ -76,7 +76,7 @@
+ status = "okay";
+
+ pinctrl-names = "default";
+- pinctrl-0 = <&pins_uart2>;
++ pinctrl-0 = <&pins_uart3>;
+ };
+
+ &uart4 {
+@@ -196,9 +196,9 @@
+ bias-disable;
+ };
+
+- pins_uart2: uart2 {
+- function = "uart2";
+- groups = "uart2-data", "uart2-hwflow";
++ pins_uart3: uart3 {
++ function = "uart3";
++ groups = "uart3-data", "uart3-hwflow";
+ bias-disable;
+ };
+
+diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
+index 6fb16fd24035..2beb78a62b7d 100644
+--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
++++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
+@@ -161,7 +161,7 @@
+ #dma-cells = <2>;
+
+ interrupt-parent = <&intc>;
+- interrupts = <29>;
++ interrupts = <20>;
+
+ clocks = <&cgu JZ4740_CLK_DMA>;
+
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index d4f7fd4550e1..85522c137f19 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
+ static int get_frame_info(struct mips_frame_info *info)
+ {
+ bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
+- union mips_instruction insn, *ip, *ip_end;
++ union mips_instruction insn, *ip;
+ const unsigned int max_insns = 128;
+ unsigned int last_insn_size = 0;
+ unsigned int i;
+@@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
+ if (!ip)
+ goto err;
+
+- ip_end = (void *)ip + info->func_size;
+-
+- for (i = 0; i < max_insns && ip < ip_end; i++) {
++ for (i = 0; i < max_insns; i++) {
+ ip = (void *)ip + last_insn_size;
++
+ if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
+ insn.word = ip->halfword[0] << 16;
+ last_insn_size = 2;
+diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
+index 0531f49af5c3..ce70bceb8872 100644
+--- a/arch/riscv/include/asm/processor.h
++++ b/arch/riscv/include/asm/processor.h
+@@ -22,7 +22,7 @@
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+-#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
++#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+
+ #define STACK_TOP TASK_SIZE
+ #define STACK_TOP_MAX STACK_TOP
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index 2c290e6aaa6e..6d652826b5cb 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -196,7 +196,7 @@ static void __init setup_bootmem(void)
+ BUG_ON(mem_size == 0);
+
+ set_max_mapnr(PFN_DOWN(mem_size));
+- max_low_pfn = memblock_end_of_DRAM();
++ max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ setup_initrd();
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 1d9bfaff60bc..658ebf645f42 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -28,7 +28,8 @@ static void __init zone_sizes_init(void)
+ unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
+
+ #ifdef CONFIG_ZONE_DMA32
+- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
++ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
++ (unsigned long) PFN_PHYS(max_low_pfn)));
+ #endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index 64037895b085..f105ae8651c9 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -600,6 +600,14 @@ ENTRY(trampoline_32bit_src)
+ leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
+ movl %eax, %cr3
+ 3:
++ /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
++ pushl %ecx
++ movl $MSR_EFER, %ecx
++ rdmsr
++ btsl $_EFER_LME, %eax
++ wrmsr
++ popl %ecx
++
+ /* Enable PAE and LA57 (if required) paging modes */
+ movl $X86_CR4_PAE, %eax
+ cmpl $0, %edx
+diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
+index 91f75638f6e6..6ff7e81b5628 100644
+--- a/arch/x86/boot/compressed/pgtable.h
++++ b/arch/x86/boot/compressed/pgtable.h
+@@ -6,7 +6,7 @@
+ #define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
+
+ #define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
+-#define TRAMPOLINE_32BIT_CODE_SIZE 0x60
++#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
+
+ #define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
+
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index b684f0294f35..e2b1447192a8 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
+ */
+ static void free_fake_cpuc(struct cpu_hw_events *cpuc)
+ {
+- kfree(cpuc->shared_regs);
++ intel_cpuc_finish(cpuc);
+ kfree(cpuc);
+ }
+
+@@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
+ cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
+ if (!cpuc)
+ return ERR_PTR(-ENOMEM);
+-
+- /* only needed, if we have extra_regs */
+- if (x86_pmu.extra_regs) {
+- cpuc->shared_regs = allocate_shared_regs(cpu);
+- if (!cpuc->shared_regs)
+- goto error;
+- }
+ cpuc->is_fake = 1;
++
++ if (intel_cpuc_prepare(cpuc, cpu))
++ goto error;
++
+ return cpuc;
+ error:
+ free_fake_cpuc(cpuc);
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index ede20c44cc69..3d77c736299f 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -1999,6 +1999,39 @@ static void intel_pmu_nhm_enable_all(int added)
+ intel_pmu_enable_all(added);
+ }
+
++static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
++{
++ u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
++
++ if (cpuc->tfa_shadow != val) {
++ cpuc->tfa_shadow = val;
++ wrmsrl(MSR_TSX_FORCE_ABORT, val);
++ }
++}
++
++static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
++{
++ /*
++ * We're going to use PMC3, make sure TFA is set before we touch it.
++ */
++ if (cntr == 3 && !cpuc->is_fake)
++ intel_set_tfa(cpuc, true);
++}
++
++static void intel_tfa_pmu_enable_all(int added)
++{
++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++
++ /*
++ * If we find PMC3 is no longer used when we enable the PMU, we can
++ * clear TFA.
++ */
++ if (!test_bit(3, cpuc->active_mask))
++ intel_set_tfa(cpuc, false);
++
++ intel_pmu_enable_all(added);
++}
++
+ static void enable_counter_freeze(void)
+ {
+ update_debugctlmsr(get_debugctlmsr() |
+@@ -2768,6 +2801,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
+ raw_spin_unlock(&excl_cntrs->lock);
+ }
+
++static struct event_constraint *
++dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
++{
++ WARN_ON_ONCE(!cpuc->constraint_list);
++
++ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
++ struct event_constraint *cx;
++
++ /*
++ * grab pre-allocated constraint entry
++ */
++ cx = &cpuc->constraint_list[idx];
++
++ /*
++ * initialize dynamic constraint
++ * with static constraint
++ */
++ *cx = *c;
++
++ /*
++ * mark constraint as dynamic
++ */
++ cx->flags |= PERF_X86_EVENT_DYNAMIC;
++ c = cx;
++ }
++
++ return c;
++}
++
+ static struct event_constraint *
+ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
+ int idx, struct event_constraint *c)
+@@ -2798,27 +2860,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
+ * only needed when constraint has not yet
+ * been cloned (marked dynamic)
+ */
+- if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+- struct event_constraint *cx;
+-
+- /*
+- * grab pre-allocated constraint entry
+- */
+- cx = &cpuc->constraint_list[idx];
+-
+- /*
+- * initialize dynamic constraint
+- * with static constraint
+- */
+- *cx = *c;
+-
+- /*
+- * mark constraint as dynamic, so we
+- * can free it later on
+- */
+- cx->flags |= PERF_X86_EVENT_DYNAMIC;
+- c = cx;
+- }
++ c = dyn_constraint(cpuc, c, idx);
+
+ /*
+ * From here on, the constraint is dynamic.
+@@ -3345,6 +3387,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ return c;
+ }
+
++static bool allow_tsx_force_abort = true;
++
++static struct event_constraint *
++tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
++ struct perf_event *event)
++{
++ struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
++
++ /*
++ * Without TFA we must not use PMC3.
++ */
++ if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
++ c = dyn_constraint(cpuc, c, idx);
++ c->idxmsk64 &= ~(1ULL << 3);
++ c->weight--;
++ }
++
++ return c;
++}
++
+ /*
+ * Broadwell:
+ *
+@@ -3398,7 +3460,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
+ return x86_event_sysfs_show(page, config, event);
+ }
+
+-struct intel_shared_regs *allocate_shared_regs(int cpu)
++static struct intel_shared_regs *allocate_shared_regs(int cpu)
+ {
+ struct intel_shared_regs *regs;
+ int i;
+@@ -3430,23 +3492,24 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
+ return c;
+ }
+
+-static int intel_pmu_cpu_prepare(int cpu)
+-{
+- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
++int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
++{
+ if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
+ cpuc->shared_regs = allocate_shared_regs(cpu);
+ if (!cpuc->shared_regs)
+ goto err;
+ }
+
+- if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
++ if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
+ size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
+
+- cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
++ cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
+ if (!cpuc->constraint_list)
+ goto err_shared_regs;
++ }
+
++ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+ cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
+ if (!cpuc->excl_cntrs)
+ goto err_constraint_list;
+@@ -3468,6 +3531,11 @@ err:
+ return -ENOMEM;
+ }
+
++static int intel_pmu_cpu_prepare(int cpu)
++{
++ return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
++}
++
+ static void flip_smm_bit(void *data)
+ {
+ unsigned long set = *(unsigned long *)data;
+@@ -3542,9 +3610,8 @@ static void intel_pmu_cpu_starting(int cpu)
+ }
+ }
+
+-static void free_excl_cntrs(int cpu)
++static void free_excl_cntrs(struct cpu_hw_events *cpuc)
+ {
+- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ struct intel_excl_cntrs *c;
+
+ c = cpuc->excl_cntrs;
+@@ -3552,9 +3619,10 @@ static void free_excl_cntrs(int cpu)
+ if (c->core_id == -1 || --c->refcnt == 0)
+ kfree(c);
+ cpuc->excl_cntrs = NULL;
+- kfree(cpuc->constraint_list);
+- cpuc->constraint_list = NULL;
+ }
++
++ kfree(cpuc->constraint_list);
++ cpuc->constraint_list = NULL;
+ }
+
+ static void intel_pmu_cpu_dying(int cpu)
+@@ -3565,9 +3633,8 @@ static void intel_pmu_cpu_dying(int cpu)
+ disable_counter_freeze();
+ }
+
+-static void intel_pmu_cpu_dead(int cpu)
++void intel_cpuc_finish(struct cpu_hw_events *cpuc)
+ {
+- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ struct intel_shared_regs *pc;
+
+ pc = cpuc->shared_regs;
+@@ -3577,7 +3644,12 @@ static void intel_pmu_cpu_dead(int cpu)
+ cpuc->shared_regs = NULL;
+ }
+
+- free_excl_cntrs(cpu);
++ free_excl_cntrs(cpuc);
++}
++
++static void intel_pmu_cpu_dead(int cpu)
++{
++ intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
+ }
+
+ static void intel_pmu_sched_task(struct perf_event_context *ctx,
+@@ -4070,8 +4142,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
+ NULL
+ };
+
++DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
++
+ static struct attribute *intel_pmu_attrs[] = {
+ &dev_attr_freeze_on_smi.attr,
++ NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
+ NULL,
+ };
+
+@@ -4564,6 +4639,15 @@ __init int intel_pmu_init(void)
+ tsx_attr = hsw_tsx_events_attrs;
+ intel_pmu_pebs_data_source_skl(
+ boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
++
++ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
++ x86_pmu.flags |= PMU_FL_TFA;
++ x86_pmu.get_event_constraints = tfa_get_event_constraints;
++ x86_pmu.enable_all = intel_tfa_pmu_enable_all;
++ x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
++ intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
++ }
++
+ pr_cont("Skylake events, ");
+ name = "skylake";
+ break;
+@@ -4715,7 +4799,7 @@ static __init int fixup_ht_bug(void)
+ hardlockup_detector_perf_restart();
+
+ for_each_online_cpu(c)
+- free_excl_cntrs(c);
++ free_excl_cntrs(&per_cpu(cpu_hw_events, c));
+
+ cpus_read_unlock();
+ pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index d46fd6754d92..a345d079f876 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -242,6 +242,11 @@ struct cpu_hw_events {
+ struct intel_excl_cntrs *excl_cntrs;
+ int excl_thread_id; /* 0 or 1 */
+
++ /*
++ * SKL TSX_FORCE_ABORT shadow
++ */
++ u64 tfa_shadow;
++
+ /*
+ * AMD specific bits
+ */
+@@ -681,6 +686,7 @@ do { \
+ #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
+ #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
+ #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
++#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
+
+ #define EVENT_VAR(_id) event_attr_##_id
+ #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
+@@ -889,7 +895,8 @@ struct event_constraint *
+ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event);
+
+-struct intel_shared_regs *allocate_shared_regs(int cpu);
++extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
++extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
+
+ int intel_pmu_init(void);
+
+@@ -1025,9 +1032,13 @@ static inline int intel_pmu_init(void)
+ return 0;
+ }
+
+-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
++static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
++{
++ return 0;
++}
++
++static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
+ {
+- return NULL;
+ }
+
+ static inline int is_ht_workaround_enabled(void)
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 28c4a502b419..9246a6715cf2 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -342,6 +342,7 @@
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
++#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
+ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index 0dd6b0f4000e..d9a9993af882 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -6,7 +6,7 @@
+ * "Big Core" Processors (Branded as Core, Xeon, etc...)
+ *
+ * The "_X" parts are generally the EP and EX Xeons, or the
+- * "Extreme" ones, like Broadwell-E.
++ * "Extreme" ones, like Broadwell-E, or Atom microserver.
+ *
+ * While adding a new CPUID for a new microarchitecture, add a new
+ * group to keep logically sorted out in chronological order. Within
+@@ -71,6 +71,7 @@
+ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
+ #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
+ #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
++#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
+
+ /* Xeon Phi */
+
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 9e39cc8bd989..ea192e402abe 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -630,6 +630,12 @@
+
+ #define MSR_IA32_TSC_DEADLINE 0x000006E0
+
++
++#define MSR_TSX_FORCE_ABORT 0x0000010F
++
++#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
++#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
++
+ /* P4/Xeon+ specific */
+ #define MSR_IA32_MCG_EAX 0x00000180
+ #define MSR_IA32_MCG_EBX 0x00000181
+diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
+index 8f657286d599..0ce558a8150d 100644
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -7,7 +7,11 @@
+ #endif
+
+ #ifdef CONFIG_KASAN
++#ifdef CONFIG_KASAN_EXTRA
++#define KASAN_STACK_ORDER 2
++#else
+ #define KASAN_STACK_ORDER 1
++#endif
+ #else
+ #define KASAN_STACK_ORDER 0
+ #endif
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 07b5fc00b188..a4e7e100ed26 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -707,7 +707,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
+ if (!p) {
+ return ret;
+ } else {
+- if (boot_cpu_data.microcode == p->patch_id)
++ if (boot_cpu_data.microcode >= p->patch_id)
+ return ret;
+
+ ret = UCODE_NEW;
+diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
+index 278cd07228dd..9490a2845f14 100644
+--- a/arch/x86/kernel/kexec-bzimage64.c
++++ b/arch/x86/kernel/kexec-bzimage64.c
+@@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
+ struct efi_info *current_ei = &boot_params.efi_info;
+ struct efi_info *ei = &params->efi_info;
+
++ if (!efi_enabled(EFI_RUNTIME_SERVICES))
++ return 0;
++
+ if (!current_ei->efi_memmap_size)
+ return 0;
+
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 30a5111ae5fd..527e69b12002 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -635,6 +635,22 @@ static void quirk_no_aersid(struct pci_dev *pdev)
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
+
++static void quirk_intel_th_dnv(struct pci_dev *dev)
++{
++ struct resource *r = &dev->resource[4];
++
++ /*
++ * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
++ * appears to be 4 MB in reality.
++ */
++ if (r->end == r->start + 0x7ff) {
++ r->start = 0;
++ r->end = 0x3fffff;
++ r->flags |= IORESOURCE_UNSET;
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
++
+ #ifdef CONFIG_PHYS_ADDR_T_64BIT
+
+ #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
+diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
+index 11fed6c06a7c..b5938160fb3d 100644
+--- a/arch/xtensa/configs/smp_lx200_defconfig
++++ b/arch/xtensa/configs/smp_lx200_defconfig
+@@ -33,6 +33,7 @@ CONFIG_SMP=y
+ CONFIG_HOTPLUG_CPU=y
+ # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
+ # CONFIG_PCI is not set
++CONFIG_VECTORS_OFFSET=0x00002000
+ CONFIG_XTENSA_PLATFORM_XTFPGA=y
+ CONFIG_CMDLINE_BOOL=y
+ CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
+diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
+index 9053a5622d2c..5bd38ea2da38 100644
+--- a/arch/xtensa/kernel/head.S
++++ b/arch/xtensa/kernel/head.S
+@@ -280,12 +280,13 @@ should_never_return:
+
+ movi a2, cpu_start_ccount
+ 1:
++ memw
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ movi a3, 0
+ s32i a3, a2, 0
+- memw
+ 1:
++ memw
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ wsr a3, ccount
+@@ -321,11 +322,13 @@ ENTRY(cpu_restart)
+ rsr a0, prid
+ neg a2, a0
+ movi a3, cpu_start_id
++ memw
+ s32i a2, a3, 0
+ #if XCHAL_DCACHE_IS_WRITEBACK
+ dhwbi a3, 0
+ #endif
+ 1:
++ memw
+ l32i a2, a3, 0
+ dhi a3, 0
+ bne a2, a0, 1b
+diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
+index 932d64689bac..be1f280c322c 100644
+--- a/arch/xtensa/kernel/smp.c
++++ b/arch/xtensa/kernel/smp.c
+@@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ {
+ unsigned i;
+
+- for (i = 0; i < max_cpus; ++i)
++ for_each_possible_cpu(i)
+ set_cpu_present(i, true);
+ }
+
+@@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
+ pr_info("%s: Core Count = %d\n", __func__, ncpus);
+ pr_info("%s: Core Id = %d\n", __func__, core_id);
+
++ if (ncpus > NR_CPUS) {
++ ncpus = NR_CPUS;
++ pr_info("%s: limiting core count by %d\n", __func__, ncpus);
++ }
++
+ for (i = 0; i < ncpus; ++i)
+ set_cpu_possible(i, true);
+ }
+@@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
+ int i;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+- cpu_start_id = cpu;
+- system_flush_invalidate_dcache_range(
+- (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
++ WRITE_ONCE(cpu_start_id, cpu);
++ /* Pairs with the third memw in the cpu_restart */
++ mb();
++ system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
++ sizeof(cpu_start_id));
+ #endif
+ smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
+
+@@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
+ ccount = get_ccount();
+ while (!ccount);
+
+- cpu_start_ccount = ccount;
++ WRITE_ONCE(cpu_start_ccount, ccount);
+
+- while (time_before(jiffies, timeout)) {
++ do {
++ /*
++ * Pairs with the first two memws in the
++ * .Lboot_secondary.
++ */
+ mb();
+- if (!cpu_start_ccount)
+- break;
+- }
++ ccount = READ_ONCE(cpu_start_ccount);
++ } while (ccount && time_before(jiffies, timeout));
+
+- if (cpu_start_ccount) {
++ if (ccount) {
+ smp_call_function_single(0, mx_cpu_stop,
+- (void *)cpu, 1);
+- cpu_start_ccount = 0;
++ (void *)cpu, 1);
++ WRITE_ONCE(cpu_start_ccount, 0);
+ return -EIO;
+ }
+ }
+@@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
+ pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
+ __func__, cpu, idle, start_info.stack);
+
++ init_completion(&cpu_running);
+ ret = boot_secondary(cpu, idle);
+ if (ret == 0) {
+ wait_for_completion_timeout(&cpu_running,
+@@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ while (time_before(jiffies, timeout)) {
+ system_invalidate_dcache_range((unsigned long)&cpu_start_id,
+- sizeof(cpu_start_id));
+- if (cpu_start_id == -cpu) {
++ sizeof(cpu_start_id));
++ /* Pairs with the second memw in the cpu_restart */
++ mb();
++ if (READ_ONCE(cpu_start_id) == -cpu) {
+ platform_cpu_kill(cpu);
+ return;
+ }
+diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
+index fd524a54d2ab..378186b5eb40 100644
+--- a/arch/xtensa/kernel/time.c
++++ b/arch/xtensa/kernel/time.c
+@@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
+ container_of(evt, struct ccount_timer, evt);
+
+ if (timer->irq_enabled) {
+- disable_irq(evt->irq);
++ disable_irq_nosync(evt->irq);
+ timer->irq_enabled = 0;
+ }
+ return 0;
+diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
+index 38c35c32aff2..c1c72b42dda0 100644
+--- a/block/blk-iolatency.c
++++ b/block/blk-iolatency.c
+@@ -72,6 +72,7 @@
+ #include <linux/sched/loadavg.h>
+ #include <linux/sched/signal.h>
+ #include <trace/events/block.h>
++#include <linux/blk-mq.h>
+ #include "blk-rq-qos.h"
+ #include "blk-stat.h"
+
+@@ -648,6 +649,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
+ return;
+
+ enabled = blk_iolatency_enabled(iolat->blkiolat);
++ if (!enabled)
++ return;
++
+ while (blkg && blkg->parent) {
+ iolat = blkg_to_lat(blkg);
+ if (!iolat) {
+@@ -657,7 +661,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
+ rqw = &iolat->rq_wait;
+
+ atomic_dec(&rqw->inflight);
+- if (!enabled || iolat->min_lat_nsec == 0)
++ if (iolat->min_lat_nsec == 0)
+ goto next;
+ iolatency_record_time(iolat, &bio->bi_issue, now,
+ issue_as_root);
+@@ -801,10 +805,13 @@ int blk_iolatency_init(struct request_queue *q)
+ return 0;
+ }
+
+-static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
++/*
++ * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
++ * return 0.
++ */
++static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+ {
+ struct iolatency_grp *iolat = blkg_to_lat(blkg);
+- struct blk_iolatency *blkiolat = iolat->blkiolat;
+ u64 oldval = iolat->min_lat_nsec;
+
+ iolat->min_lat_nsec = val;
+@@ -813,9 +820,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+ BLKIOLATENCY_MAX_WIN_SIZE);
+
+ if (!oldval && val)
+- atomic_inc(&blkiolat->enabled);
++ return 1;
+ if (oldval && !val)
+- atomic_dec(&blkiolat->enabled);
++ return -1;
++ return 0;
+ }
+
+ static void iolatency_clear_scaling(struct blkcg_gq *blkg)
+@@ -847,6 +855,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
+ u64 lat_val = 0;
+ u64 oldval;
+ int ret;
++ int enable = 0;
+
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
+ if (ret)
+@@ -881,7 +890,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
+ blkg = ctx.blkg;
+ oldval = iolat->min_lat_nsec;
+
+- iolatency_set_min_lat_nsec(blkg, lat_val);
++ enable = iolatency_set_min_lat_nsec(blkg, lat_val);
++ if (enable) {
++ WARN_ON_ONCE(!blk_get_queue(blkg->q));
++ blkg_get(blkg);
++ }
++
+ if (oldval != iolat->min_lat_nsec) {
+ iolatency_clear_scaling(blkg);
+ }
+@@ -889,6 +903,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
+ ret = 0;
+ out:
+ blkg_conf_finish(&ctx);
++ if (ret == 0 && enable) {
++ struct iolatency_grp *tmp = blkg_to_lat(blkg);
++ struct blk_iolatency *blkiolat = tmp->blkiolat;
++
++ blk_mq_freeze_queue(blkg->q);
++
++ if (enable == 1)
++ atomic_inc(&blkiolat->enabled);
++ else if (enable == -1)
++ atomic_dec(&blkiolat->enabled);
++ else
++ WARN_ON_ONCE(1);
++
++ blk_mq_unfreeze_queue(blkg->q);
++
++ blkg_put(blkg);
++ blk_put_queue(blkg->q);
++ }
+ return ret ?: nbytes;
+ }
+
+@@ -1024,8 +1056,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
+ {
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ struct blkcg_gq *blkg = lat_to_blkg(iolat);
++ struct blk_iolatency *blkiolat = iolat->blkiolat;
++ int ret;
+
+- iolatency_set_min_lat_nsec(blkg, 0);
++ ret = iolatency_set_min_lat_nsec(blkg, 0);
++ if (ret == 1)
++ atomic_inc(&blkiolat->enabled);
++ if (ret == -1)
++ atomic_dec(&blkiolat->enabled);
+ iolatency_clear_scaling(blkg);
+ }
+
+diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
+index f133b7f5652f..26110e74e086 100644
+--- a/drivers/clk/qcom/gcc-sdm845.c
++++ b/drivers/clk/qcom/gcc-sdm845.c
+@@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = {
+ "core_bi_pll_test_se",
+ };
+
+-static const char * const gcc_parent_names_7[] = {
+- "bi_tcxo",
++static const char * const gcc_parent_names_7_ao[] = {
++ "bi_tcxo_ao",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+@@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = {
+ "core_bi_pll_test_se",
+ };
+
++static const char * const gcc_parent_names_8_ao[] = {
++ "bi_tcxo_ao",
++ "gpll0",
++ "core_bi_pll_test_se",
++};
++
+ static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+@@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+- .parent_names = gcc_parent_names_7,
++ .parent_names = gcc_parent_names_7_ao,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+@@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+- .parent_names = gcc_parent_names_8,
++ .parent_names = gcc_parent_names_8_ao,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
+index 8d77090ad94a..0241450f3eb3 100644
+--- a/drivers/clk/ti/divider.c
++++ b/drivers/clk/ti/divider.c
+@@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
+ num_dividers = i;
+
+ tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
+- if (!tmp)
++ if (!tmp) {
++ *table = ERR_PTR(-ENOMEM);
+ return -ENOMEM;
++ }
+
+ valid_div = 0;
+ *width = 0;
+@@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
+ {
+ struct clk_omap_divider *div;
+ struct clk_omap_reg *reg;
++ int ret;
+
+ if (!setup)
+ return NULL;
+@@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
+ div->flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+ div->table = _get_div_table_from_setup(setup, &div->width);
++ if (IS_ERR(div->table)) {
++ ret = PTR_ERR(div->table);
++ kfree(div);
++ return ERR_PTR(ret);
++ }
++
+
+ div->shift = setup->bit_shift;
+ div->latch = -EINVAL;
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index 4e557684f792..fe69dccfa0c0 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -203,6 +203,7 @@ struct at_xdmac_chan {
+ u32 save_cim;
+ u32 save_cnda;
+ u32 save_cndc;
++ u32 irq_status;
+ unsigned long status;
+ struct tasklet_struct tasklet;
+ struct dma_slave_config sconfig;
+@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
+ struct at_xdmac_desc *desc;
+ u32 error_mask;
+
+- dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
+- __func__, atchan->status);
++ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
++ __func__, atchan->irq_status);
+
+ error_mask = AT_XDMAC_CIS_RBEIS
+ | AT_XDMAC_CIS_WBEIS
+@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
+
+ if (at_xdmac_chan_is_cyclic(atchan)) {
+ at_xdmac_handle_cyclic(atchan);
+- } else if ((atchan->status & AT_XDMAC_CIS_LIS)
+- || (atchan->status & error_mask)) {
++ } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
++ || (atchan->irq_status & error_mask)) {
+ struct dma_async_tx_descriptor *txd;
+
+- if (atchan->status & AT_XDMAC_CIS_RBEIS)
++ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
+ dev_err(chan2dev(&atchan->chan), "read bus error!!!");
+- if (atchan->status & AT_XDMAC_CIS_WBEIS)
++ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
+ dev_err(chan2dev(&atchan->chan), "write bus error!!!");
+- if (atchan->status & AT_XDMAC_CIS_ROIS)
++ if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
+ dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
+
+ spin_lock(&atchan->lock);
+@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
+ atchan = &atxdmac->chan[i];
+ chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
+ chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
+- atchan->status = chan_status & chan_imr;
++ atchan->irq_status = chan_status & chan_imr;
+ dev_vdbg(atxdmac->dma.dev,
+ "%s: chan%d: imr=0x%x, status=0x%x\n",
+ __func__, i, chan_imr, chan_status);
+@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
+ at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
+
+- if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
++ if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
+
+ tasklet_schedule(&atchan->tasklet);
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index aa1712beb0cc..7b7fba0c9253 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -642,11 +642,9 @@ static int dmatest_func(void *data)
+ srcs[i] = um->addr[i] + src_off;
+ ret = dma_mapping_error(dev->dev, um->addr[i]);
+ if (ret) {
+- dmaengine_unmap_put(um);
+ result("src mapping error", total_tests,
+ src_off, dst_off, len, ret);
+- failed_tests++;
+- continue;
++ goto error_unmap_continue;
+ }
+ um->to_cnt++;
+ }
+@@ -661,11 +659,9 @@ static int dmatest_func(void *data)
+ DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(dev->dev, dsts[i]);
+ if (ret) {
+- dmaengine_unmap_put(um);
+ result("dst mapping error", total_tests,
+ src_off, dst_off, len, ret);
+- failed_tests++;
+- continue;
++ goto error_unmap_continue;
+ }
+ um->bidi_cnt++;
+ }
+@@ -693,12 +689,10 @@ static int dmatest_func(void *data)
+ }
+
+ if (!tx) {
+- dmaengine_unmap_put(um);
+ result("prep error", total_tests, src_off,
+ dst_off, len, ret);
+ msleep(100);
+- failed_tests++;
+- continue;
++ goto error_unmap_continue;
+ }
+
+ done->done = false;
+@@ -707,12 +701,10 @@ static int dmatest_func(void *data)
+ cookie = tx->tx_submit(tx);
+
+ if (dma_submit_error(cookie)) {
+- dmaengine_unmap_put(um);
+ result("submit error", total_tests, src_off,
+ dst_off, len, ret);
+ msleep(100);
+- failed_tests++;
+- continue;
++ goto error_unmap_continue;
+ }
+ dma_async_issue_pending(chan);
+
+@@ -725,16 +717,14 @@ static int dmatest_func(void *data)
+ dmaengine_unmap_put(um);
+ result("test timed out", total_tests, src_off, dst_off,
+ len, 0);
+- failed_tests++;
+- continue;
++ goto error_unmap_continue;
+ } else if (status != DMA_COMPLETE) {
+ dmaengine_unmap_put(um);
+ result(status == DMA_ERROR ?
+ "completion error status" :
+ "completion busy status", total_tests, src_off,
+ dst_off, len, ret);
+- failed_tests++;
+- continue;
++ goto error_unmap_continue;
+ }
+
+ dmaengine_unmap_put(um);
+@@ -779,6 +769,12 @@ static int dmatest_func(void *data)
+ verbose_result("test passed", total_tests, src_off,
+ dst_off, len, 0);
+ }
++
++ continue;
++
++error_unmap_continue:
++ dmaengine_unmap_put(um);
++ failed_tests++;
+ }
+ ktime = ktime_sub(ktime_get(), ktime);
+ ktime = ktime_sub(ktime, comparetime);
+diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
+index 6bc8e6640d71..c51462f5aa1e 100644
+--- a/drivers/firmware/iscsi_ibft.c
++++ b/drivers/firmware/iscsi_ibft.c
+@@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type)
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_CHAP_TYPE:
+ rc = S_IRUGO;
++ break;
+ case ISCSI_BOOT_TGT_NAME:
+ if (tgt->tgt_name_len)
+ rc = S_IRUGO;
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index 5960396c8d9a..222170a1715f 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -250,6 +250,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+ struct vf610_gpio_port *port;
+ struct resource *iores;
+ struct gpio_chip *gc;
++ int i;
+ int ret;
+
+ port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+@@ -289,6 +290,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+ if (ret < 0)
+ return ret;
+
++ /* Mask all GPIO interrupts */
++ for (i = 0; i < gc->ngpio; i++)
++ vf610_gpio_writel(0, port->base + PORT_PCR(i));
++
+ /* Clear the interrupt status register for all GPIO's */
+ vf610_gpio_writel(~0, port->base + PORT_ISFR);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 59cc678de8c1..bbac15fd8caa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1671,7 +1671,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
+ effective_mode &= ~S_IWUSR;
+
+ if ((adev->flags & AMD_IS_APU) &&
+- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
++ (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
++ attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
+ attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index e45e929aaab5..90a5970af4b7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -38,6 +38,7 @@
+ #include "amdgpu_gem.h"
+ #include <drm/amdgpu_drm.h>
+ #include <linux/dma-buf.h>
++#include <linux/dma-fence-array.h>
+
+ static const struct dma_buf_ops amdgpu_dmabuf_ops;
+
+@@ -189,6 +190,48 @@ error:
+ return ERR_PTR(ret);
+ }
+
++static int
++__reservation_object_make_exclusive(struct reservation_object *obj)
++{
++ struct dma_fence **fences;
++ unsigned int count;
++ int r;
++
++ if (!reservation_object_get_list(obj)) /* no shared fences to convert */
++ return 0;
++
++ r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
++ if (r)
++ return r;
++
++ if (count == 0) {
++ /* Now that was unexpected. */
++ } else if (count == 1) {
++ reservation_object_add_excl_fence(obj, fences[0]);
++ dma_fence_put(fences[0]);
++ kfree(fences);
++ } else {
++ struct dma_fence_array *array;
++
++ array = dma_fence_array_create(count, fences,
++ dma_fence_context_alloc(1), 0,
++ false);
++ if (!array)
++ goto err_fences_put;
++
++ reservation_object_add_excl_fence(obj, &array->base);
++ dma_fence_put(&array->base);
++ }
++
++ return 0;
++
++err_fences_put:
++ while (count--)
++ dma_fence_put(fences[count]);
++ kfree(fences);
++ return -ENOMEM;
++}
++
+ /**
+ * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
+ * @dma_buf: Shared DMA buffer
+@@ -220,16 +263,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
+
+ if (attach->dev->driver != adev->dev->driver) {
+ /*
+- * Wait for all shared fences to complete before we switch to future
+- * use of exclusive fence on this prime shared bo.
++ * We only create shared fences for internal use, but importers
++ * of the dmabuf rely on exclusive fences for implicitly
++ * tracking write hazards. As any of the current fences may
++ * correspond to a write, we need to convert all existing
++ * fences on the reservation object into a single exclusive
++ * fence.
+ */
+- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+- true, false,
+- MAX_SCHEDULE_TIMEOUT);
+- if (unlikely(r < 0)) {
+- DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
++ r = __reservation_object_make_exclusive(bo->tbo.resv);
++ if (r)
+ goto error_unreserve;
+- }
+ }
+
+ /* pin buffer into GTT */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 62df4bd0a0fc..16c83155ef5c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -3405,14 +3405,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+ struct amdgpu_task_info *task_info)
+ {
+ struct amdgpu_vm *vm;
++ unsigned long flags;
+
+- spin_lock(&adev->vm_manager.pasid_lock);
++ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm)
+ *task_info = vm->task_info;
+
+- spin_unlock(&adev->vm_manager.pasid_lock);
++ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+index f8cee95d61cc..7d5cbadbe1cb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+@@ -92,7 +92,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
+ static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+ bool enable)
+ {
++ u32 tmp = 0;
+
++ if (enable) {
++ tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
++ REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
++ REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
++
++ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
++ lower_32_bits(adev->doorbell.base));
++ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
++ upper_32_bits(adev->doorbell.base));
++ }
++
++ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
+ }
+
+ static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 4cc0dcb1a187..825d1cae85ab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -705,11 +705,13 @@ static int soc15_common_early_init(void *handle)
+ break;
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 0x8)
+- adev->external_rev_id = adev->rev_id + 0x81;
++ adev->external_rev_id = adev->rev_id + 0x79;
+ else if (adev->pdev->device == 0x15d8)
+ adev->external_rev_id = adev->rev_id + 0x41;
++ else if (adev->rev_id == 1)
++ adev->external_rev_id = adev->rev_id + 0x20;
+ else
+- adev->external_rev_id = 0x1;
++ adev->external_rev_id = adev->rev_id + 0x01;
+
+ if (adev->rev_id >= 0x8) {
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
+index 00a9c2ab9e6c..64fb788b6647 100644
+--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
++++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
+@@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll)
+
+ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
+ {
+- struct dsi_data *dsi = p;
++ struct dsi_data *dsi = s->private;
+ struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
+ enum dss_clk_source dispc_clk_src, dsi_clk_src;
+ int dsi_module = dsi->module_id;
+@@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
+ #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
+ {
+- struct dsi_data *dsi = p;
++ struct dsi_data *dsi = s->private;
+ unsigned long flags;
+ struct dsi_irq_stats stats;
+
+@@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
+
+ static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
+ {
+- struct dsi_data *dsi = p;
++ struct dsi_data *dsi = s->private;
+
+ if (dsi_runtime_get(dsi))
+ return 0;
+@@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
+ dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
++ /*
++ * HACK: These flags should be handled through the omap_dss_device bus
++ * flags, but this will only be possible when the DSI encoder will be
++ * converted to the omapdrm-managed encoder model.
++ */
++ dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
++ dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
++ dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
++ dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
++ dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
++ dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(&dsi->output, &dsi->vm);
+
+@@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
+
+ snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
+ dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
+- dsi_dump_dsi_regs, &dsi);
++ dsi_dump_dsi_regs, dsi);
+ #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
+ dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
+- dsi_dump_dsi_irqs, &dsi);
++ dsi_dump_dsi_irqs, dsi);
+ #endif
+ snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
+ dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
+- dsi_dump_dsi_clocks, &dsi);
++ dsi_dump_dsi_clocks, dsi);
+
+ return 0;
+ }
+@@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data)
+ dss_debugfs_remove_file(dsi->debugfs.irqs);
+ dss_debugfs_remove_file(dsi->debugfs.regs);
+
+- of_platform_depopulate(dev);
+-
+ WARN_ON(dsi->scp_clk_refcount > 0);
+
+ dss_pll_unregister(&dsi->pll);
+@@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev)
+
+ dsi_uninit_output(dsi);
+
++ of_platform_depopulate(&pdev->dev);
++
+ pm_runtime_disable(&pdev->dev);
+
+ if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index d587779a80b4..a97294ac96d5 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
+ u16 data_offset, size;
+ u8 frev, crev;
+ struct ci_power_info *pi;
+- enum pci_bus_speed speed_cap;
++ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
+ struct pci_dev *root = rdev->pdev->bus->self;
+ int ret;
+
+@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
+ return -ENOMEM;
+ rdev->pm.dpm.priv = pi;
+
+- speed_cap = pcie_get_speed_cap(root);
++ if (!pci_is_root_bus(rdev->pdev->bus))
++ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
+ pi->sys_pcie_mask = 0;
+ } else {
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 8fb60b3af015..0a785ef0ab66 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
+ struct ni_power_info *ni_pi;
+ struct si_power_info *si_pi;
+ struct atom_clock_dividers dividers;
+- enum pci_bus_speed speed_cap;
++ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
+ struct pci_dev *root = rdev->pdev->bus->self;
+ int ret;
+
+@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
+ eg_pi = &ni_pi->eg;
+ pi = &eg_pi->rv7xx;
+
+- speed_cap = pcie_get_speed_cap(root);
++ if (!pci_is_root_bus(rdev->pdev->bus))
++ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
+ si_pi->sys_pcie_mask = 0;
+ } else {
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+index f949287d926c..4e0562aa2cc9 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+@@ -760,6 +760,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
+ return PTR_ERR(tcon->sclk0);
+ }
+ }
++ clk_prepare_enable(tcon->sclk0);
+
+ if (tcon->quirks->has_channel_1) {
+ tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
+@@ -774,6 +775,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
+
+ static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
+ {
++ clk_disable_unprepare(tcon->sclk0);
+ clk_disable_unprepare(tcon->clk);
+ }
+
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index b1086bfb0465..cd9c65f3d404 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -1500,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-#ifdef CONFIG_PM
+-static int omap_i2c_runtime_suspend(struct device *dev)
++static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
+ {
+ struct omap_i2c_dev *omap = dev_get_drvdata(dev);
+
+@@ -1527,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev)
+ return 0;
+ }
+
+-static int omap_i2c_runtime_resume(struct device *dev)
++static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
+ {
+ struct omap_i2c_dev *omap = dev_get_drvdata(dev);
+
+@@ -1542,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev)
+ }
+
+ static const struct dev_pm_ops omap_i2c_pm_ops = {
++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
++ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
+ omap_i2c_runtime_resume, NULL)
+ };
+-#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
+-#else
+-#define OMAP_I2C_PM_OPS NULL
+-#endif /* CONFIG_PM */
+
+ static struct platform_driver omap_i2c_driver = {
+ .probe = omap_i2c_probe,
+ .remove = omap_i2c_remove,
+ .driver = {
+ .name = "omap_i2c",
+- .pm = OMAP_I2C_PM_OPS,
++ .pm = &omap_i2c_pm_ops,
+ .of_match_table = of_match_ptr(omap_i2c_of_match),
+ },
+ };
+diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
+index 676c1fd1119d..7ef385db52c3 100644
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -356,6 +356,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
+ umem->writable = 1;
+ umem->is_odp = 1;
+ odp_data->per_mm = per_mm;
++ umem->owning_mm = per_mm->mm;
++ mmgrab(umem->owning_mm);
+
+ mutex_init(&odp_data->umem_mutex);
+ init_completion(&odp_data->notifier_completion);
+@@ -388,6 +390,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
+ out_page_list:
+ vfree(odp_data->page_list);
+ out_odp_data:
++ mmdrop(umem->owning_mm);
+ kfree(odp_data);
+ return ERR_PTR(ret);
+ }
+diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
+index 4baa8f4d49de..46bf74375ea6 100644
+--- a/drivers/infiniband/hw/hfi1/ud.c
++++ b/drivers/infiniband/hw/hfi1/ud.c
+@@ -980,7 +980,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
+ opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+ wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+- tlen -= sizeof(u32);
+ } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+ wc.ex.imm_data = 0;
+ wc.wc_flags = 0;
+diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
+index 4d4c31ea4e2d..90268b838d4e 100644
+--- a/drivers/infiniband/hw/qib/qib_ud.c
++++ b/drivers/infiniband/hw/qib/qib_ud.c
+@@ -513,7 +513,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
+ opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+ wc.ex.imm_data = ohdr->u.ud.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+- tlen -= sizeof(u32);
+ } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+ wc.ex.imm_data = 0;
+ wc.wc_flags = 0;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
+index 1da119d901a9..73e808c1e6ad 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib.h
++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
+@@ -248,7 +248,6 @@ struct ipoib_cm_tx {
+ struct list_head list;
+ struct net_device *dev;
+ struct ipoib_neigh *neigh;
+- struct ipoib_path *path;
+ struct ipoib_tx_buf *tx_ring;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 0428e01e8f69..aa9dcfc36cd3 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
+
+ neigh->cm = tx;
+ tx->neigh = neigh;
+- tx->path = path;
+ tx->dev = dev;
+ list_add(&tx->list, &priv->cm.start_list);
+ set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
+@@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
+ neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ goto free_neigh;
+ }
+- memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
++ memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index 225ae6980182..628ef617bb2f 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1337,6 +1337,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN0000", 0 },
+ { "ELAN0100", 0 },
+ { "ELAN0600", 0 },
++ { "ELAN0601", 0 },
+ { "ELAN0602", 0 },
+ { "ELAN0605", 0 },
+ { "ELAN0608", 0 },
+diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
+index 38bfaca48eab..150f9eecaca7 100644
+--- a/drivers/input/tablet/wacom_serial4.c
++++ b/drivers/input/tablet/wacom_serial4.c
+@@ -187,6 +187,7 @@ enum {
+ MODEL_DIGITIZER_II = 0x5544, /* UD */
+ MODEL_GRAPHIRE = 0x4554, /* ET */
+ MODEL_PENPARTNER = 0x4354, /* CT */
++ MODEL_ARTPAD_II = 0x4B54, /* KT */
+ };
+
+ static void wacom_handle_model_response(struct wacom *wacom)
+@@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
+ wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
+ break;
+
++ case MODEL_ARTPAD_II:
+ case MODEL_DIGITIZER_II:
+ wacom->dev->name = "Wacom Digitizer II";
+ wacom->dev->id.version = MODEL_DIGITIZER_II;
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 325f3bad118b..4d2c5d4f586f 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1929,16 +1929,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
+
+ static void do_detach(struct iommu_dev_data *dev_data)
+ {
++ struct protection_domain *domain = dev_data->domain;
+ struct amd_iommu *iommu;
+ u16 alias;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+ alias = dev_data->alias;
+
+- /* decrease reference counters */
+- dev_data->domain->dev_iommu[iommu->index] -= 1;
+- dev_data->domain->dev_cnt -= 1;
+-
+ /* Update data structures */
+ dev_data->domain = NULL;
+ list_del(&dev_data->list);
+@@ -1948,6 +1945,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
+
+ /* Flush the DTE entry */
+ device_flush_dte(dev_data);
++
++ /* Flush IOTLB */
++ domain_flush_tlb_pde(domain);
++
++ /* Wait for the flushes to finish */
++ domain_flush_complete(domain);
++
++ /* decrease reference counters - needs to happen after the flushes */
++ domain->dev_iommu[iommu->index] -= 1;
++ domain->dev_cnt -= 1;
+ }
+
+ /*
+@@ -2555,13 +2562,13 @@ out_unmap:
+ bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
+ iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
+
+- if (--mapped_pages)
++ if (--mapped_pages == 0)
+ goto out_free_iova;
+ }
+ }
+
+ out_free_iova:
+- free_iova_fast(&dma_dom->iovad, address, npages);
++ free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
+
+ out_err:
+ return 0;
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 350f999d205b..c3aba3fc818d 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1586,6 +1586,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
+ nr_irqs /= 2;
+ } while (nr_irqs > 0);
+
++ if (!nr_irqs)
++ err = -ENOSPC;
++
+ if (err)
+ goto out;
+
+@@ -2065,6 +2068,29 @@ static int __init allocate_lpi_tables(void)
+ return 0;
+ }
+
++static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
++{
++ u32 count = 1000000; /* 1s! */
++ bool clean;
++ u64 val;
++
++ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
++ val &= ~GICR_VPENDBASER_Valid;
++ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
++
++ do {
++ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
++ clean = !(val & GICR_VPENDBASER_Dirty);
++ if (!clean) {
++ count--;
++ cpu_relax();
++ udelay(1);
++ }
++ } while (!clean && count);
++
++ return val;
++}
++
+ static void its_cpu_init_lpis(void)
+ {
+ void __iomem *rbase = gic_data_rdist_rd_base();
+@@ -2150,6 +2176,30 @@ static void its_cpu_init_lpis(void)
+ val |= GICR_CTLR_ENABLE_LPIS;
+ writel_relaxed(val, rbase + GICR_CTLR);
+
++ if (gic_rdists->has_vlpis) {
++ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
++
++ /*
++ * It's possible for CPU to receive VLPIs before it is
++ * sheduled as a vPE, especially for the first CPU, and the
++ * VLPI with INTID larger than 2^(IDbits+1) will be considered
++ * as out of range and dropped by GIC.
++ * So we initialize IDbits to known value to avoid VLPI drop.
++ */
++ val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
++ pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
++ smp_processor_id(), val);
++ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
++
++ /*
++ * Also clear Valid bit of GICR_VPENDBASER, in case some
++ * ancient programming gets left in and has possibility of
++ * corrupting memory.
++ */
++ val = its_clear_vpend_valid(vlpi_base);
++ WARN_ON(val & GICR_VPENDBASER_Dirty);
++ }
++
+ /* Make sure the GIC has seen the above */
+ dsb(sy);
+ out:
+@@ -2776,26 +2826,11 @@ static void its_vpe_schedule(struct its_vpe *vpe)
+ static void its_vpe_deschedule(struct its_vpe *vpe)
+ {
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+- u32 count = 1000000; /* 1s! */
+- bool clean;
+ u64 val;
+
+- /* We're being scheduled out */
+- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+- val &= ~GICR_VPENDBASER_Valid;
+- gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+-
+- do {
+- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+- clean = !(val & GICR_VPENDBASER_Dirty);
+- if (!clean) {
+- count--;
+- cpu_relax();
+- udelay(1);
+- }
+- } while (!clean && count);
++ val = its_clear_vpend_valid(vlpi_base);
+
+- if (unlikely(!clean && !count)) {
++ if (unlikely(val & GICR_VPENDBASER_Dirty)) {
+ pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+ vpe->idai = false;
+ vpe->pending_last = true;
+diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
+index 25f32e1d7764..3496b61a312a 100644
+--- a/drivers/irqchip/irq-mmp.c
++++ b/drivers/irqchip/irq-mmp.c
+@@ -34,6 +34,9 @@
+ #define SEL_INT_PENDING (1 << 6)
+ #define SEL_INT_NUM_MASK 0x3f
+
++#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
++#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
++
+ struct icu_chip_data {
+ int nr_irqs;
+ unsigned int virq_base;
+@@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = {
+ static const struct mmp_intc_conf mmp2_conf = {
+ .conf_enable = 0x20,
+ .conf_disable = 0x0,
+- .conf_mask = 0x7f,
++ .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
++ MMP2_ICU_INT_ROUTE_PJ4_FIQ,
+ };
+
+ static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 66a174979b3c..81745644f720 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -274,6 +274,7 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
+ unsigned int new_keycode)
+ {
+ int old_keycode = rc_map->scan[index].keycode;
++ int i;
+
+ /* Did the user wish to remove the mapping? */
+ if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) {
+@@ -288,9 +289,20 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
+ old_keycode == KEY_RESERVED ? "New" : "Replacing",
+ rc_map->scan[index].scancode, new_keycode);
+ rc_map->scan[index].keycode = new_keycode;
++ __set_bit(new_keycode, dev->input_dev->keybit);
+ }
+
+ if (old_keycode != KEY_RESERVED) {
++ /* A previous mapping was updated... */
++ __clear_bit(old_keycode, dev->input_dev->keybit);
++ /* ... but another scancode might use the same keycode */
++ for (i = 0; i < rc_map->len; i++) {
++ if (rc_map->scan[i].keycode == old_keycode) {
++ __set_bit(old_keycode, dev->input_dev->keybit);
++ break;
++ }
++ }
++
+ /* Possibly shrink the keytable, failure is not a problem */
+ ir_resize_table(dev, rc_map, GFP_ATOMIC);
+ }
+@@ -1750,7 +1762,6 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
+ set_bit(EV_REP, dev->input_dev->evbit);
+ set_bit(EV_MSC, dev->input_dev->evbit);
+ set_bit(MSC_SCAN, dev->input_dev->mscbit);
+- bitmap_fill(dev->input_dev->keybit, KEY_CNT);
+
+ /* Pointer/mouse events */
+ set_bit(EV_REL, dev->input_dev->evbit);
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 76dc3ee8ca21..5ad4f91662e6 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1065,11 +1065,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- /* Make sure the terminal type MSB is not null, otherwise it
+- * could be confused with a unit.
++ /*
++ * Reject invalid terminal types that would cause issues:
++ *
++ * - The high byte must be non-zero, otherwise it would be
++ * confused with a unit.
++ *
++ * - Bit 15 must be 0, as we use it internally as a terminal
++ * direction flag.
++ *
++ * Other unknown types are accepted.
+ */
+ type = get_unaligned_le16(&buffer[4]);
+- if ((type & 0xff00) == 0) {
++ if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
+ uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
+ "interface %d INPUT_TERMINAL %d has invalid "
+ "type 0x%04x, skipping\n", udev->devnum,
+diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
+index 0fb986ba3290..0ae723f75341 100644
+--- a/drivers/net/ethernet/altera/altera_msgdma.c
++++ b/drivers/net/ethernet/altera/altera_msgdma.c
+@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
+ & 0xffff;
+
+ if (inuse) { /* Tx FIFO is not empty */
+- ready = priv->tx_prod - priv->tx_cons - inuse - 1;
++ ready = max_t(int,
++ priv->tx_prod - priv->tx_cons - inuse - 1, 0);
+ } else {
+ /* Check for buffered last packet */
+ status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
+diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
+index 3d45f4c92cf6..9bbaad9f3d63 100644
+--- a/drivers/net/ethernet/cadence/macb.h
++++ b/drivers/net/ethernet/cadence/macb.h
+@@ -643,6 +643,7 @@
+ #define MACB_CAPS_JUMBO 0x00000020
+ #define MACB_CAPS_GEM_HAS_PTP 0x00000040
+ #define MACB_CAPS_BD_RD_PREFETCH 0x00000080
++#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
+ #define MACB_CAPS_FIFO_MODE 0x10000000
+ #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
+ #define MACB_CAPS_SG_DISABLED 0x40000000
+@@ -1214,6 +1215,8 @@ struct macb {
+
+ int rx_bd_rd_prefetch;
+ int tx_bd_rd_prefetch;
++
++ u32 rx_intr_mask;
+ };
+
+ #ifdef CONFIG_MACB_USE_HWSTAMP
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 4c816e5a841f..73aa7969db96 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -56,8 +56,7 @@
+ /* level of occupied TX descriptors under which we wake up TX process */
+ #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
+
+-#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
+- | MACB_BIT(ISR_ROVR))
++#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
+ #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
+ | MACB_BIT(ISR_RLE) \
+ | MACB_BIT(TXERR))
+@@ -1270,7 +1269,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
+ napi_reschedule(napi);
+ } else {
+- queue_writel(queue, IER, MACB_RX_INT_FLAGS);
++ queue_writel(queue, IER, bp->rx_intr_mask);
+ }
+ }
+
+@@ -1288,7 +1287,7 @@ static void macb_hresp_error_task(unsigned long data)
+ u32 ctrl;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+- queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
++ queue_writel(queue, IDR, bp->rx_intr_mask |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+@@ -1318,7 +1317,7 @@ static void macb_hresp_error_task(unsigned long data)
+
+ /* Enable interrupts */
+ queue_writel(queue, IER,
+- MACB_RX_INT_FLAGS |
++ bp->rx_intr_mask |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+@@ -1372,14 +1371,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
+ (unsigned int)(queue - bp->queues),
+ (unsigned long)status);
+
+- if (status & MACB_RX_INT_FLAGS) {
++ if (status & bp->rx_intr_mask) {
+ /* There's no point taking any more interrupts
+ * until we have processed the buffers. The
+ * scheduling call may fail if the poll routine
+ * is already scheduled, so disable interrupts
+ * now.
+ */
+- queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
++ queue_writel(queue, IDR, bp->rx_intr_mask);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
+
+@@ -1412,8 +1411,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
+ /* There is a hardware issue under heavy load where DMA can
+ * stop, this causes endless "used buffer descriptor read"
+ * interrupts but it can be cleared by re-enabling RX. See
+- * the at91 manual, section 41.3.1 or the Zynq manual
+- * section 16.7.4 for details.
++ * the at91rm9200 manual, section 41.3.1 or the Zynq manual
++ * section 16.7.4 for details. RXUBR is only enabled for
++ * these two versions.
+ */
+ if (status & MACB_BIT(RXUBR)) {
+ ctrl = macb_readl(bp, NCR);
+@@ -2263,7 +2263,7 @@ static void macb_init_hw(struct macb *bp)
+
+ /* Enable interrupts */
+ queue_writel(queue, IER,
+- MACB_RX_INT_FLAGS |
++ bp->rx_intr_mask |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+@@ -3911,6 +3911,7 @@ static const struct macb_config sama5d4_config = {
+ };
+
+ static const struct macb_config emac_config = {
++ .caps = MACB_CAPS_NEEDS_RSTONUBR,
+ .clk_init = at91ether_clk_init,
+ .init = at91ether_init,
+ };
+@@ -3932,7 +3933,8 @@ static const struct macb_config zynqmp_config = {
+ };
+
+ static const struct macb_config zynq_config = {
+- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
++ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
++ MACB_CAPS_NEEDS_RSTONUBR,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+@@ -4087,6 +4089,10 @@ static int macb_probe(struct platform_device *pdev)
+ macb_dma_desc_get_size(bp);
+ }
+
++ bp->rx_intr_mask = MACB_RX_INT_FLAGS;
++ if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
++ bp->rx_intr_mask |= MACB_BIT(RXUBR);
++
+ mac = of_get_mac_address(np);
+ if (mac) {
+ ether_addr_copy(bp->dev->dev_addr, mac);
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index 6242249c9f4c..b043370c2685 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -2419,6 +2419,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
+ out_notify_fail:
+ (void)cancel_work_sync(&priv->service_task);
+ out_read_prop_fail:
++ /* safe for ACPI FW */
++ of_node_put(to_of_node(priv->fwnode));
+ free_netdev(ndev);
+ return ret;
+ }
+@@ -2448,6 +2450,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
+ set_bit(NIC_STATE_REMOVING, &priv->state);
+ (void)cancel_work_sync(&priv->service_task);
+
++ /* safe for ACPI FW */
++ of_node_put(to_of_node(priv->fwnode));
++
+ free_netdev(ndev);
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+index 774beda040a1..e2710ff48fb0 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+@@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
+ */
+ static int hns_nic_nway_reset(struct net_device *netdev)
+ {
+- int ret = 0;
+ struct phy_device *phy = netdev->phydev;
+
+- if (netif_running(netdev)) {
+- /* if autoneg is disabled, don't restart auto-negotiation */
+- if (phy && phy->autoneg == AUTONEG_ENABLE)
+- ret = genphy_restart_aneg(phy);
+- }
++ if (!netif_running(netdev))
++ return 0;
+
+- return ret;
++ if (!phy)
++ return -EOPNOTSUPP;
++
++ if (phy->autoneg != AUTONEG_ENABLE)
++ return -EINVAL;
++
++ return genphy_restart_aneg(phy);
+ }
+
+ static u32
+diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
+index 017e08452d8c..baf5cc251f32 100644
+--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
+@@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+ }
+
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+- MDIO_C45_WRITE_ADDR, phy_id, devad);
++ MDIO_C45_READ, phy_id, devad);
+ }
+
+ /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index 88a8576ca9ce..f81abed29a76 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -480,19 +480,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
+
+ /* get pq index according to PQ_FLAGS */
+ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
+- u32 pq_flags)
++ unsigned long pq_flags)
+ {
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ /* Can't have multiple flags set here */
+- if (bitmap_weight((unsigned long *)&pq_flags,
++ if (bitmap_weight(&pq_flags,
+ sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
+- DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
++ DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
+ goto err;
+ }
+
+ if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
+- DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
++ DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
+ goto err;
+ }
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index 67c02ea93906..64ac95ca4df2 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
++ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
++ (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
++ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
++
+ SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+@@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ return rc;
+ }
+
++ if (p_params->update_ctl_frame_check) {
++ p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
++ p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
++ }
++
+ /* Update mcast bins for VFs, PF doesn't use this functionality */
+ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+@@ -2207,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+ u16 num_queues = 0;
+
+ /* Since the feature controls only queue-zones,
+- * make sure we have the contexts [rx, tx, xdp] to
++ * make sure we have the contexts [rx, xdp, tcs] to
+ * match.
+ */
+ for_each_hwfn(cdev, i) {
+@@ -2217,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+ u16 cids;
+
+ cids = hwfn->pf_params.eth_pf_params.num_cons;
+- num_queues += min_t(u16, l2_queues, cids / 3);
++ cids /= (2 + info->num_tc);
++ num_queues += min_t(u16, l2_queues, cids);
+ }
+
+ /* queues might theoretically be >256, but interrupts'
+@@ -2688,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+ accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+ QED_ACCEPT_MCAST_UNMATCHED;
+- accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
++ accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
++ QED_ACCEPT_MCAST_UNMATCHED;
+ } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+ accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+ accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
+index 8d80f1095d17..7127d5aaac42 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
+@@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
+ struct qed_rss_params *rss_params;
+ struct qed_filter_accept_flags accept_flags;
+ struct qed_sge_tpa_params *sge_tpa_params;
++ u8 update_ctl_frame_check;
++ u8 mac_chk_en;
++ u8 ethtype_chk_en;
+ };
+
+ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index 9e728ec82c21..25f67c0d5c57 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -2441,19 +2441,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
+ {
+ struct qed_ll2_tx_pkt_info pkt;
+ const skb_frag_t *frag;
++ u8 flags = 0, nr_frags;
+ int rc = -EINVAL, i;
+ dma_addr_t mapping;
+ u16 vlan = 0;
+- u8 flags = 0;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
+ DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
+ return -EINVAL;
+ }
+
+- if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
++ /* Cache number of fragments from SKB since SKB may be freed by
++ * the completion routine after calling qed_ll2_prepare_tx_packet()
++ */
++ nr_frags = skb_shinfo(skb)->nr_frags;
++
++ if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
+- 1 + skb_shinfo(skb)->nr_frags);
++ 1 + nr_frags);
+ return -EINVAL;
+ }
+
+@@ -2475,7 +2480,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
+ }
+
+ memset(&pkt, 0, sizeof(pkt));
+- pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
++ pkt.num_of_bds = 1 + nr_frags;
+ pkt.vlan = vlan;
+ pkt.bd_flags = flags;
+ pkt.tx_dest = QED_LL2_TX_DEST_NW;
+@@ -2486,12 +2491,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
+ test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
+ pkt.remove_stag = true;
+
++ /* qed_ll2_prepare_tx_packet() may actually send the packet if
++ * there are no fragments in the skb and subsequently the completion
++ * routine may run and free the SKB, so no dereferencing the SKB
++ * beyond this point unless skb has any fragments.
++ */
+ rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
+ &pkt, 1);
+ if (rc)
+ goto err;
+
+- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+
+ mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
+index 3157c0d99441..dae2896e1d8e 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
+@@ -380,6 +380,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
+ * @param p_hwfn
+ */
+ void qed_consq_free(struct qed_hwfn *p_hwfn);
++int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
+
+ /**
+ * @file
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
+index 0a9c5bb0fa48..a721b9348b6c 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
+@@ -402,6 +402,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
+
+ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+
++ /* Attempt to post pending requests */
++ spin_lock_bh(&p_hwfn->p_spq->lock);
++ rc = qed_spq_pend_post(p_hwfn);
++ spin_unlock_bh(&p_hwfn->p_spq->lock);
++
+ return rc;
+ }
+
+@@ -744,7 +749,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
+ return 0;
+ }
+
+-static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
++int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+ {
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+@@ -882,7 +887,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_spq_entry *tmp;
+ struct qed_spq_entry *found = NULL;
+- int rc;
+
+ if (!p_hwfn)
+ return -EINVAL;
+@@ -940,12 +944,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ */
+ qed_spq_return_entry(p_hwfn, found);
+
+- /* Attempt to post pending requests */
+- spin_lock_bh(&p_spq->lock);
+- rc = qed_spq_pend_post(p_hwfn);
+- spin_unlock_bh(&p_spq->lock);
+-
+- return rc;
++ return 0;
+ }
+
+ int qed_consq_alloc(struct qed_hwfn *p_hwfn)
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+index ca6290fa0f30..71a7af134dd8 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+@@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
+ params.vport_id = vf->vport_id;
+ params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+ params.mtu = vf->mtu;
+- params.check_mac = true;
++
++ /* Non trusted VFs should enable control frame filtering */
++ params.check_mac = !vf->p_vf_info.is_trusted_configured;
+
+ rc = qed_sp_eth_vport_start(p_hwfn, &params);
+ if (rc) {
+@@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+
++ params.update_ctl_frame_check = 1;
++ params.mac_chk_en = !vf_info->is_trusted_configured;
++
+ if (vf_info->rx_accept_mode & mask) {
+ flags->update_rx_mode_config = 1;
+ flags->rx_accept_filter = vf_info->rx_accept_mode;
+@@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
+ }
+
+ if (flags->update_rx_mode_config ||
+- flags->update_tx_mode_config)
++ flags->update_tx_mode_config ||
++ params.update_ctl_frame_check)
+ qed_sp_vport_update(hwfn, &params,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
+index b6cccf44bf40..5dda547772c1 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
+@@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+ struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct vf_pf_resc_request *p_resc;
++ u8 retry_cnt = VF_ACQUIRE_THRESH;
+ bool resources_acquired = false;
+ struct vfpf_acquire_tlv *req;
+ int rc = 0, attempts = 0;
+@@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+
+ /* send acquire request */
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
++
++ /* Re-try acquire in case of vf-pf hw channel timeout */
++ if (retry_cnt && rc == -EBUSY) {
++ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
++ "VF retrying to acquire due to VPC timeout\n");
++ retry_cnt--;
++ continue;
++ }
++
+ if (rc)
+ goto exit;
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
+index de98a974673b..4b875f652ecd 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede.h
++++ b/drivers/net/ethernet/qlogic/qede/qede.h
+@@ -489,6 +489,9 @@ struct qede_reload_args {
+
+ /* Datapath functions definition */
+ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
++u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
++ struct net_device *sb_dev,
++ select_queue_fallback_t fallback);
+ netdev_features_t qede_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+index 1a78027de071..a96da16f3404 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ return NETDEV_TX_OK;
+ }
+
++u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
++ struct net_device *sb_dev,
++ select_queue_fallback_t fallback)
++{
++ struct qede_dev *edev = netdev_priv(dev);
++ int total_txq;
++
++ total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
++
++ return QEDE_TSS_COUNT(edev) ?
++ fallback(dev, skb, NULL) % total_txq : 0;
++}
++
+ /* 8B udp header + 8B base tunnel header + 32B option length */
+ #define QEDE_MAX_TUN_HDR_LEN 48
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 46d0f2eaa0c0..f3d9c40c4115 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
++ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+@@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
++ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+@@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
++ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+index 7b923362ee55..3b174eae77c1 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
+ }
+
+ ret = phy_power_on(bsp_priv, true);
+- if (ret)
++ if (ret) {
++ gmac_clk_enable(bsp_priv, false);
+ return ret;
++ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+index 9caf79ba5ef1..4d5fb4b51cc4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -719,8 +719,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
+ {
+ unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
+
+- if (!clk)
+- return 0;
++ if (!clk) {
++ clk = priv->plat->clk_ref_rate;
++ if (!clk)
++ return 0;
++ }
+
+ return (usec * (clk / 1000000)) / 256;
+ }
+@@ -729,8 +732,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
+ {
+ unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
+
+- if (!clk)
+- return 0;
++ if (!clk) {
++ clk = priv->plat->clk_ref_rate;
++ if (!clk)
++ return 0;
++ }
+
+ return (riwt * 256) / (clk / 1000000);
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 5d83d6a7694b..9340526d2a9a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3031,10 +3031,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ tx_q = &priv->tx_queue[queue];
+
++ if (priv->tx_path_in_lpi_mode)
++ stmmac_disable_eee_mode(priv);
++
+ /* Manage oversized TCP frames for GMAC4 device */
+ if (skb_is_gso(skb) && priv->tso) {
+- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
++ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
++ /*
++ * There is no way to determine the number of TSO
++ * capable Queues. Let's use always the Queue 0
++ * because if TSO is supported then at least this
++ * one will be capable.
++ */
++ skb_set_queue_mapping(skb, 0);
++
+ return stmmac_tso_xmit(skb, dev);
++ }
+ }
+
+ if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
+@@ -3049,9 +3061,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+ return NETDEV_TX_BUSY;
+ }
+
+- if (priv->tx_path_in_lpi_mode)
+- stmmac_disable_eee_mode(priv);
+-
+ entry = tx_q->cur_tx;
+ first_entry = entry;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 59fdda67f89f..0a3b2c45549e 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ {
+ .id = WCN3990_HW_1_0_DEV_VERSION,
+ .dev_id = 0,
+- .bus = ATH10K_BUS_PCI,
++ .bus = ATH10K_BUS_SNOC,
+ .name = "wcn3990 hw1.0",
+ .continuous_frag_desc = true,
+ .tx_chain_mask = 0x7,
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index c070a9e51ebf..fae572b38416 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc)
+ ret = ath9k_eeprom_request(sc, eeprom_name);
+ if (ret)
+ return ret;
++
++ ah->ah_flags &= ~AH_USE_EEPROM;
++ ah->ah_flags |= AH_NO_EEP_SWAP;
+ }
+
+ mac = of_get_mac_address(np);
+ if (mac)
+ ether_addr_copy(common->macaddr, mac);
+
+- ah->ah_flags &= ~AH_USE_EEPROM;
+- ah->ah_flags |= AH_NO_EEP_SWAP;
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
+index bd10165d7eec..4d4b07701149 100644
+--- a/drivers/net/wireless/ti/wlcore/sdio.c
++++ b/drivers/net/wireless/ti/wlcore/sdio.c
+@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
+ }
+
+ sdio_claim_host(func);
++ /*
++ * To guarantee that the SDIO card is power cycled, as required to make
++ * the FW programming to succeed, let's do a brute force HW reset.
++ */
++ mmc_hw_reset(card->host);
++
+ sdio_enable_func(func);
+ sdio_release_host(func);
+
+@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
+ {
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
+ struct mmc_card *card = func->card;
+- int error;
+
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+
+ /* Let runtime PM know the card is powered off */
+- error = pm_runtime_put(&card->dev);
+- if (error < 0 && error != -EBUSY) {
+- dev_err(&card->dev, "%s failed: %i\n", __func__, error);
+-
+- return error;
+- }
+-
++ pm_runtime_put(&card->dev);
+ return 0;
+ }
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 5f9a5ef93969..21d7b646c73d 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1182,6 +1182,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ * effects say only one namespace is affected.
+ */
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
++ mutex_lock(&ctrl->scan_lock);
+ nvme_start_freeze(ctrl);
+ nvme_wait_freeze(ctrl);
+ }
+@@ -1210,8 +1211,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
+ */
+ if (effects & NVME_CMD_EFFECTS_LBCC)
+ nvme_update_formats(ctrl);
+- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
++ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ nvme_unfreeze(ctrl);
++ mutex_unlock(&ctrl->scan_lock);
++ }
+ if (effects & NVME_CMD_EFFECTS_CCC)
+ nvme_init_identify(ctrl);
+ if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
+@@ -3300,6 +3303,7 @@ static void nvme_scan_work(struct work_struct *work)
+ if (nvme_identify_ctrl(ctrl, &id))
+ return;
+
++ mutex_lock(&ctrl->scan_lock);
+ nn = le32_to_cpu(id->nn);
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+ !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
+@@ -3308,6 +3312,7 @@ static void nvme_scan_work(struct work_struct *work)
+ }
+ nvme_scan_ns_sequential(ctrl, nn);
+ out_free_id:
++ mutex_unlock(&ctrl->scan_lock);
+ kfree(id);
+ down_write(&ctrl->namespaces_rwsem);
+ list_sort(NULL, &ctrl->namespaces, ns_cmp);
+@@ -3550,6 +3555,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+
+ ctrl->state = NVME_CTRL_NEW;
+ spin_lock_init(&ctrl->lock);
++ mutex_init(&ctrl->scan_lock);
+ INIT_LIST_HEAD(&ctrl->namespaces);
+ init_rwsem(&ctrl->namespaces_rwsem);
+ ctrl->dev = dev;
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 6ffa99a10a60..5274881f9141 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -153,6 +153,7 @@ struct nvme_ctrl {
+ enum nvme_ctrl_state state;
+ bool identified;
+ spinlock_t lock;
++ struct mutex scan_lock;
+ const struct nvme_ctrl_ops *ops;
+ struct request_queue *admin_q;
+ struct request_queue *connect_q;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index c0d01048ce4d..5c58e0ffa3ac 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2280,27 +2280,18 @@ static void nvme_reset_work(struct work_struct *work)
+ if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
+ nvme_dev_disable(dev, false);
+
+- /*
+- * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
+- * initializing procedure here.
+- */
+- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
+- dev_warn(dev->ctrl.device,
+- "failed to mark controller CONNECTING\n");
+- goto out;
+- }
+-
++ mutex_lock(&dev->shutdown_lock);
+ result = nvme_pci_enable(dev);
+ if (result)
+- goto out;
++ goto out_unlock;
+
+ result = nvme_pci_configure_admin_queue(dev);
+ if (result)
+- goto out;
++ goto out_unlock;
+
+ result = nvme_alloc_admin_tags(dev);
+ if (result)
+- goto out;
++ goto out_unlock;
+
+ /*
+ * Limit the max command size to prevent iod->sg allocations going
+@@ -2308,6 +2299,17 @@ static void nvme_reset_work(struct work_struct *work)
+ */
+ dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+ dev->ctrl.max_segments = NVME_MAX_SEGS;
++ mutex_unlock(&dev->shutdown_lock);
++
++ /*
++ * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
++ * initializing procedure here.
++ */
++ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
++ dev_warn(dev->ctrl.device,
++ "failed to mark controller CONNECTING\n");
++ goto out;
++ }
+
+ result = nvme_init_identify(&dev->ctrl);
+ if (result)
+@@ -2372,6 +2374,8 @@ static void nvme_reset_work(struct work_struct *work)
+ nvme_start_ctrl(&dev->ctrl);
+ return;
+
++ out_unlock:
++ mutex_unlock(&dev->shutdown_lock);
+ out:
+ nvme_remove_dead_ctrl(dev, result);
+ }
+diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
+index 0dbcf429089f..1a8b85051b1b 100644
+--- a/drivers/pci/pcie/pme.c
++++ b/drivers/pci/pcie/pme.c
+@@ -432,31 +432,6 @@ static void pcie_pme_remove(struct pcie_device *srv)
+ kfree(get_service_data(srv));
+ }
+
+-static int pcie_pme_runtime_suspend(struct pcie_device *srv)
+-{
+- struct pcie_pme_service_data *data = get_service_data(srv);
+-
+- spin_lock_irq(&data->lock);
+- pcie_pme_interrupt_enable(srv->port, false);
+- pcie_clear_root_pme_status(srv->port);
+- data->noirq = true;
+- spin_unlock_irq(&data->lock);
+-
+- return 0;
+-}
+-
+-static int pcie_pme_runtime_resume(struct pcie_device *srv)
+-{
+- struct pcie_pme_service_data *data = get_service_data(srv);
+-
+- spin_lock_irq(&data->lock);
+- pcie_pme_interrupt_enable(srv->port, true);
+- data->noirq = false;
+- spin_unlock_irq(&data->lock);
+-
+- return 0;
+-}
+-
+ static struct pcie_port_service_driver pcie_pme_driver = {
+ .name = "pcie_pme",
+ .port_type = PCI_EXP_TYPE_ROOT_PORT,
+@@ -464,8 +439,6 @@ static struct pcie_port_service_driver pcie_pme_driver = {
+
+ .probe = pcie_pme_probe,
+ .suspend = pcie_pme_suspend,
+- .runtime_suspend = pcie_pme_runtime_suspend,
+- .runtime_resume = pcie_pme_runtime_resume,
+ .resume = pcie_pme_resume,
+ .remove = pcie_pme_remove,
+ };
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index b03481ef99a1..98905d4a79ca 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ break;
+
+ case MCP_TYPE_S18:
++ one_regmap_config =
++ devm_kmemdup(dev, &mcp23x17_regmap,
++ sizeof(struct regmap_config), GFP_KERNEL);
++ if (!one_regmap_config)
++ return -ENOMEM;
+ mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
+- &mcp23x17_regmap);
++ one_regmap_config);
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23s18";
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 54f6a40c75c6..0430cad6d84e 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -901,6 +901,7 @@ config TOSHIBA_WMI
+ config ACPI_CMPC
+ tristate "CMPC Laptop Extras"
+ depends on ACPI && INPUT
++ depends on BACKLIGHT_LCD_SUPPORT
+ depends on RFKILL || RFKILL=n
+ select BACKLIGHT_CLASS_DEVICE
+ help
+@@ -1124,6 +1125,7 @@ config INTEL_OAKTRAIL
+ config SAMSUNG_Q10
+ tristate "Samsung Q10 Extras"
+ depends on ACPI
++ depends on BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ This driver provides support for backlight control on Samsung Q10
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index 99af1a0a3314..8f2af450152f 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -22,6 +22,7 @@
+ #include <linux/hashtable.h>
+ #include <linux/ip.h>
+ #include <linux/refcount.h>
++#include <linux/workqueue.h>
+
+ #include <net/ipv6.h>
+ #include <net/if_inet6.h>
+@@ -790,6 +791,7 @@ struct qeth_card {
+ struct qeth_seqno seqno;
+ struct qeth_card_options options;
+
++ struct workqueue_struct *event_wq;
+ wait_queue_head_t wait_q;
+ spinlock_t mclock;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+@@ -968,7 +970,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[];
+ extern const struct attribute_group qeth_device_attr_group;
+ extern const struct attribute_group qeth_device_blkt_group;
+ extern const struct device_type qeth_generic_devtype;
+-extern struct workqueue_struct *qeth_wq;
+
+ int qeth_card_hw_is_reachable(struct qeth_card *);
+ const char *qeth_get_cardname_short(struct qeth_card *);
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 0c9a5250dd93..ebbc3ad504f9 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -76,8 +76,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
+ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
+ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
+
+-struct workqueue_struct *qeth_wq;
+-EXPORT_SYMBOL_GPL(qeth_wq);
++static struct workqueue_struct *qeth_wq;
+
+ int qeth_card_hw_is_reachable(struct qeth_card *card)
+ {
+@@ -568,6 +567,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
+ QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
+ rc, CARD_DEVID(card));
+ atomic_set(&channel->irq_pending, 0);
++ qeth_release_buffer(channel, iob);
+ card->read_or_write_problem = 1;
+ qeth_schedule_recovery(card);
+ wake_up(&card->wait_q);
+@@ -1129,6 +1129,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ rc = qeth_get_problem(card, cdev, irb);
+ if (rc) {
+ card->read_or_write_problem = 1;
++ if (iob)
++ qeth_release_buffer(iob->channel, iob);
+ qeth_clear_ipacmd_list(card);
+ qeth_schedule_recovery(card);
+ goto out;
+@@ -1468,6 +1470,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
+ CARD_RDEV(card) = gdev->cdev[0];
+ CARD_WDEV(card) = gdev->cdev[1];
+ CARD_DDEV(card) = gdev->cdev[2];
++
++ card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
++ if (!card->event_wq)
++ goto out_wq;
+ if (qeth_setup_channel(&card->read, true))
+ goto out_ip;
+ if (qeth_setup_channel(&card->write, true))
+@@ -1483,6 +1489,8 @@ out_data:
+ out_channel:
+ qeth_clean_channel(&card->read);
+ out_ip:
++ destroy_workqueue(card->event_wq);
++out_wq:
+ dev_set_drvdata(&gdev->dev, NULL);
+ kfree(card);
+ out:
+@@ -1811,6 +1819,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card,
+ QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ atomic_set(&channel->irq_pending, 0);
++ qeth_release_buffer(channel, iob);
+ wake_up(&card->wait_q);
+ return rc;
+ }
+@@ -1880,6 +1889,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
+ rc);
+ QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ atomic_set(&channel->irq_pending, 0);
++ qeth_release_buffer(channel, iob);
+ wake_up(&card->wait_q);
+ return rc;
+ }
+@@ -2060,6 +2070,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ }
+ reply = qeth_alloc_reply(card);
+ if (!reply) {
++ qeth_release_buffer(channel, iob);
+ return -ENOMEM;
+ }
+ reply->callback = reply_cb;
+@@ -2391,11 +2402,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
+ return 0;
+ }
+
+-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
++static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
+ {
+ if (!q)
+ return;
+
++ qeth_clear_outq_buffers(q, 1);
+ qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+ kfree(q);
+ }
+@@ -2469,10 +2481,8 @@ out_freeoutqbufs:
+ card->qdio.out_qs[i]->bufs[j] = NULL;
+ }
+ out_freeoutq:
+- while (i > 0) {
+- qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
+- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+- }
++ while (i > 0)
++ qeth_free_output_queue(card->qdio.out_qs[--i]);
+ kfree(card->qdio.out_qs);
+ card->qdio.out_qs = NULL;
+ out_freepool:
+@@ -2505,10 +2515,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
+ qeth_free_buffer_pool(card);
+ /* free outbound qdio_qs */
+ if (card->qdio.out_qs) {
+- for (i = 0; i < card->qdio.no_out_queues; ++i) {
+- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+- qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
+- }
++ for (i = 0; i < card->qdio.no_out_queues; i++)
++ qeth_free_output_queue(card->qdio.out_qs[i]);
+ kfree(card->qdio.out_qs);
+ card->qdio.out_qs = NULL;
+ }
+@@ -5035,6 +5043,7 @@ static void qeth_core_free_card(struct qeth_card *card)
+ qeth_clean_channel(&card->read);
+ qeth_clean_channel(&card->write);
+ qeth_clean_channel(&card->data);
++ destroy_workqueue(card->event_wq);
+ qeth_free_qdio_buffers(card);
+ unregister_service_level(&card->qeth_service_level);
+ dev_set_drvdata(&card->gdev->dev, NULL);
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index 8d3601891c62..a6c55cbf3d69 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -391,6 +391,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
+ qeth_clear_cmd_buffers(&card->read);
+ qeth_clear_cmd_buffers(&card->write);
+ }
++
++ flush_workqueue(card->event_wq);
+ }
+
+ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
+@@ -823,6 +825,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
+
+ if (cgdev->state == CCWGROUP_ONLINE)
+ qeth_l2_set_offline(cgdev);
++
++ cancel_work_sync(&card->close_dev_work);
+ if (qeth_netdev_is_registered(card->dev))
+ unregister_netdev(card->dev);
+ }
+@@ -1453,7 +1457,7 @@ static void qeth_bridge_state_change(struct qeth_card *card,
+ data->card = card;
+ memcpy(&data->qports, qports,
+ sizeof(struct qeth_sbp_state_change) + extrasize);
+- queue_work(qeth_wq, &data->worker);
++ queue_work(card->event_wq, &data->worker);
+ }
+
+ struct qeth_bridge_host_data {
+@@ -1525,7 +1529,7 @@ static void qeth_bridge_host_event(struct qeth_card *card,
+ data->card = card;
+ memcpy(&data->hostevs, hostevs,
+ sizeof(struct qeth_ipacmd_addr_change) + extrasize);
+- queue_work(qeth_wq, &data->worker);
++ queue_work(card->event_wq, &data->worker);
+ }
+
+ /* SETBRIDGEPORT support; sending commands */
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index f08b745c2007..29a2408b9282 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -1436,6 +1436,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
+ qeth_clear_cmd_buffers(&card->read);
+ qeth_clear_cmd_buffers(&card->write);
+ }
++
++ flush_workqueue(card->event_wq);
+ }
+
+ /*
+@@ -2428,6 +2430,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
+ if (cgdev->state == CCWGROUP_ONLINE)
+ qeth_l3_set_offline(cgdev);
+
++ cancel_work_sync(&card->close_dev_work);
+ if (qeth_netdev_is_registered(card->dev))
+ unregister_netdev(card->dev);
+ qeth_l3_clear_ip_htable(card, 0);
+diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
+index 6be77b3aa8a5..ac79f2088b31 100644
+--- a/drivers/scsi/53c700.c
++++ b/drivers/scsi/53c700.c
+@@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
+ if(tpnt->sdev_attrs == NULL)
+ tpnt->sdev_attrs = NCR_700_dev_attrs;
+
+- memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
++ memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
+ GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
+ if(memory == NULL) {
+ printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 1e77d96a18f2..055fe61ea539 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -1304,8 +1304,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
+ ADD : DELETE;
+ break;
+ }
+- case AifBuManagerEvent:
+- aac_handle_aif_bu(dev, aifcmd);
++ break;
++ case AifBuManagerEvent:
++ aac_handle_aif_bu(dev, aifcmd);
+ break;
+ }
+
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
+index 350257c13a5b..bc9f2a2365f4 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
+@@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
+ return NULL;
+ }
+
++ cmgr->hba = hba;
+ cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
+ GFP_KERNEL);
+ if (!cmgr->free_list) {
+@@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
+ goto mem_err;
+ }
+
+- cmgr->hba = hba;
+ cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
+
+ for (i = 0; i < arr_sz; i++) {
+@@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
+
+ /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
+ mem_size = num_ios * sizeof(struct io_bdt *);
+- cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
++ cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool) {
+ printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
+ goto mem_err;
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index be83590ed955..ff943f477d6f 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ fc_frame_payload_op(fp) != ELS_LS_ACC) {
+ FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
+ fc_lport_error(lport, fp);
+- goto err;
++ goto out;
+ }
+
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ if (!flp) {
+ FC_LPORT_DBG(lport, "FLOGI bad response\n");
+ fc_lport_error(lport, fp);
+- goto err;
++ goto out;
+ }
+
+ mfs = ntohs(flp->fl_csp.sp_bb_data) &
+@@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
+ "lport->mfs:%hu\n", mfs, lport->mfs);
+ fc_lport_error(lport, fp);
+- goto err;
++ goto out;
+ }
+
+ if (mfs <= lport->mfs) {
+diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
+index 1e1c0f1b9e69..8ed2113f5a1e 100644
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref)
+ struct fc_rport_priv *rdata;
+
+ rdata = container_of(kref, struct fc_rport_priv, kref);
+- WARN_ON(!list_empty(&rdata->peers));
+ kfree_rcu(rdata, rcu);
+ }
+ EXPORT_SYMBOL(fc_rport_destroy);
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 60bcc6df97a9..65305b3848bc 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -62,7 +62,7 @@
+
+ /* make sure inq_product_rev string corresponds to this version */
+ #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
+-static const char *sdebug_version_date = "20180128";
++static const char *sdebug_version_date = "20190125";
+
+ #define MY_NAME "scsi_debug"
+
+@@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
+ (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
+ }
+
+-static void *fake_store(unsigned long long lba)
++static void *lba2fake_store(unsigned long long lba)
+ {
+ lba = do_div(lba, sdebug_store_sectors);
+
+@@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
+ return ret;
+ }
+
+-/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
+- * arr into fake_store(lba,num) and return true. If comparison fails then
++/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
++ * arr into lba2fake_store(lba,num) and return true. If comparison fails then
+ * return false. */
+ static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
+ {
+@@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
+ if (sdt->app_tag == cpu_to_be16(0xffff))
+ continue;
+
+- ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
++ ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
+ if (ret) {
+ dif_errors++;
+ return ret;
+@@ -3261,10 +3261,12 @@ err_out:
+ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
+ u32 ei_lba, bool unmap, bool ndob)
+ {
++ int ret;
+ unsigned long iflags;
+ unsigned long long i;
+- int ret;
+- u64 lba_off;
++ u32 lb_size = sdebug_sector_size;
++ u64 block, lbaa;
++ u8 *fs1p;
+
+ ret = check_device_access_params(scp, lba, num);
+ if (ret)
+@@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
+ unmap_region(lba, num);
+ goto out;
+ }
+-
+- lba_off = lba * sdebug_sector_size;
++ lbaa = lba;
++ block = do_div(lbaa, sdebug_store_sectors);
+ /* if ndob then zero 1 logical block, else fetch 1 logical block */
++ fs1p = fake_storep + (block * lb_size);
+ if (ndob) {
+- memset(fake_storep + lba_off, 0, sdebug_sector_size);
++ memset(fs1p, 0, lb_size);
+ ret = 0;
+ } else
+- ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
+- sdebug_sector_size);
++ ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
+
+ if (-1 == ret) {
+ write_unlock_irqrestore(&atomic_rw, iflags);
+ return DID_ERROR << 16;
+- } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
++ } else if (sdebug_verbose && !ndob && (ret < lb_size))
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: lb size=%u, IO sent=%d bytes\n",
+- my_name, "write same",
+- sdebug_sector_size, ret);
++ my_name, "write same", lb_size, ret);
+
+ /* Copy first sector to remaining blocks */
+- for (i = 1 ; i < num ; i++)
+- memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
+- fake_storep + lba_off,
+- sdebug_sector_size);
+-
++ for (i = 1 ; i < num ; i++) {
++ lbaa = lba + i;
++ block = do_div(lbaa, sdebug_store_sectors);
++ memmove(fake_storep + (block * lb_size), fs1p, lb_size);
++ }
+ if (scsi_debug_lbp())
+ map_region(lba, num);
+ out:
+diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
+index 5ce24718c2fd..d8b3ba047c28 100644
+--- a/drivers/soc/fsl/qbman/qman.c
++++ b/drivers/soc/fsl/qbman/qman.c
+@@ -1124,18 +1124,19 @@ static void qm_mr_process_task(struct work_struct *work);
+ static irqreturn_t portal_isr(int irq, void *ptr)
+ {
+ struct qman_portal *p = ptr;
+-
+- u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
+ u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
++ u32 clear = 0;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ /* DQRR-handling if it's interrupt-driven */
+- if (is & QM_PIRQ_DQRI)
++ if (is & QM_PIRQ_DQRI) {
+ __poll_portal_fast(p, QMAN_POLL_LIMIT);
++ clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
++ }
+ /* Handling of anything else that's interrupt-driven */
+- clear |= __poll_portal_slow(p, is);
++ clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
+ qm_out(&p->p, QM_REG_ISR, clear);
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
+index 5596c52e246d..ecc51ef0753f 100644
+--- a/drivers/staging/erofs/namei.c
++++ b/drivers/staging/erofs/namei.c
+@@ -15,74 +15,77 @@
+
+ #include <trace/events/erofs.h>
+
+-/* based on the value of qn->len is accurate */
+-static inline int dirnamecmp(struct qstr *qn,
+- struct qstr *qd, unsigned int *matched)
++struct erofs_qstr {
++ const unsigned char *name;
++ const unsigned char *end;
++};
++
++/* based on the end of qn is accurate and it must have the trailing '\0' */
++static inline int dirnamecmp(const struct erofs_qstr *qn,
++ const struct erofs_qstr *qd,
++ unsigned int *matched)
+ {
+- unsigned int i = *matched, len = min(qn->len, qd->len);
+-loop:
+- if (unlikely(i >= len)) {
+- *matched = i;
+- if (qn->len < qd->len) {
+- /*
+- * actually (qn->len == qd->len)
+- * when qd->name[i] == '\0'
+- */
+- return qd->name[i] == '\0' ? 0 : -1;
++ unsigned int i = *matched;
++
++ /*
++ * on-disk error, let's only BUG_ON in the debugging mode.
++ * otherwise, it will return 1 to just skip the invalid name
++ * and go on (in consideration of the lookup performance).
++ */
++ DBG_BUGON(qd->name > qd->end);
++
++ /* qd could not have trailing '\0' */
++ /* However it is absolutely safe if < qd->end */
++ while (qd->name + i < qd->end && qd->name[i] != '\0') {
++ if (qn->name[i] != qd->name[i]) {
++ *matched = i;
++ return qn->name[i] > qd->name[i] ? 1 : -1;
+ }
+- return (qn->len > qd->len);
++ ++i;
+ }
+-
+- if (qn->name[i] != qd->name[i]) {
+- *matched = i;
+- return qn->name[i] > qd->name[i] ? 1 : -1;
+- }
+-
+- ++i;
+- goto loop;
++ *matched = i;
++ /* See comments in __d_alloc on the terminating NUL character */
++ return qn->name[i] == '\0' ? 0 : 1;
+ }
+
+-static struct erofs_dirent *find_target_dirent(
+- struct qstr *name,
+- u8 *data, int maxsize)
++#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
++
++static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
++ u8 *data,
++ unsigned int dirblksize,
++ const int ndirents)
+ {
+- unsigned int ndirents, head, back;
++ int head, back;
+ unsigned int startprfx, endprfx;
+ struct erofs_dirent *const de = (struct erofs_dirent *)data;
+
+- /* make sure that maxsize is valid */
+- BUG_ON(maxsize < sizeof(struct erofs_dirent));
+-
+- ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
+-
+- /* corrupted dir (may be unnecessary...) */
+- BUG_ON(!ndirents);
+-
+- head = 0;
++ /* since the 1st dirent has been evaluated previously */
++ head = 1;
+ back = ndirents - 1;
+ startprfx = endprfx = 0;
+
+ while (head <= back) {
+- unsigned int mid = head + (back - head) / 2;
+- unsigned int nameoff = le16_to_cpu(de[mid].nameoff);
++ const int mid = head + (back - head) / 2;
++ const int nameoff = nameoff_from_disk(de[mid].nameoff,
++ dirblksize);
+ unsigned int matched = min(startprfx, endprfx);
+-
+- struct qstr dname = QSTR_INIT(data + nameoff,
+- unlikely(mid >= ndirents - 1) ?
+- maxsize - nameoff :
+- le16_to_cpu(de[mid + 1].nameoff) - nameoff);
++ struct erofs_qstr dname = {
++ .name = data + nameoff,
++ .end = unlikely(mid >= ndirents - 1) ?
++ data + dirblksize :
++ data + nameoff_from_disk(de[mid + 1].nameoff,
++ dirblksize)
++ };
+
+ /* string comparison without already matched prefix */
+ int ret = dirnamecmp(name, &dname, &matched);
+
+- if (unlikely(!ret))
++ if (unlikely(!ret)) {
+ return de + mid;
+- else if (ret > 0) {
++ } else if (ret > 0) {
+ head = mid + 1;
+ startprfx = matched;
+- } else if (unlikely(mid < 1)) /* fix "mid" overflow */
+- break;
+- else {
++ } else {
+ back = mid - 1;
+ endprfx = matched;
+ }
+@@ -91,12 +94,12 @@ static struct erofs_dirent *find_target_dirent(
+ return ERR_PTR(-ENOENT);
+ }
+
+-static struct page *find_target_block_classic(
+- struct inode *dir,
+- struct qstr *name, int *_diff)
++static struct page *find_target_block_classic(struct inode *dir,
++ struct erofs_qstr *name,
++ int *_ndirents)
+ {
+ unsigned int startprfx, endprfx;
+- unsigned int head, back;
++ int head, back;
+ struct address_space *const mapping = dir->i_mapping;
+ struct page *candidate = ERR_PTR(-ENOENT);
+
+@@ -105,41 +108,43 @@ static struct page *find_target_block_classic(
+ back = inode_datablocks(dir) - 1;
+
+ while (head <= back) {
+- unsigned int mid = head + (back - head) / 2;
++ const int mid = head + (back - head) / 2;
+ struct page *page = read_mapping_page(mapping, mid, NULL);
+
+- if (IS_ERR(page)) {
+-exact_out:
+- if (!IS_ERR(candidate)) /* valid candidate */
+- put_page(candidate);
+- return page;
+- } else {
+- int diff;
+- unsigned int ndirents, matched;
+- struct qstr dname;
++ if (!IS_ERR(page)) {
+ struct erofs_dirent *de = kmap_atomic(page);
+- unsigned int nameoff = le16_to_cpu(de->nameoff);
+-
+- ndirents = nameoff / sizeof(*de);
++ const int nameoff = nameoff_from_disk(de->nameoff,
++ EROFS_BLKSIZ);
++ const int ndirents = nameoff / sizeof(*de);
++ int diff;
++ unsigned int matched;
++ struct erofs_qstr dname;
+
+- /* corrupted dir (should have one entry at least) */
+- BUG_ON(!ndirents || nameoff > PAGE_SIZE);
++ if (unlikely(!ndirents)) {
++ DBG_BUGON(1);
++ kunmap_atomic(de);
++ put_page(page);
++ page = ERR_PTR(-EIO);
++ goto out;
++ }
+
+ matched = min(startprfx, endprfx);
+
+ dname.name = (u8 *)de + nameoff;
+- dname.len = ndirents == 1 ?
+- /* since the rest of the last page is 0 */
+- EROFS_BLKSIZ - nameoff
+- : le16_to_cpu(de[1].nameoff) - nameoff;
++ if (ndirents == 1)
++ dname.end = (u8 *)de + EROFS_BLKSIZ;
++ else
++ dname.end = (u8 *)de +
++ nameoff_from_disk(de[1].nameoff,
++ EROFS_BLKSIZ);
+
+ /* string comparison without already matched prefix */
+ diff = dirnamecmp(name, &dname, &matched);
+ kunmap_atomic(de);
+
+ if (unlikely(!diff)) {
+- *_diff = 0;
+- goto exact_out;
++ *_ndirents = 0;
++ goto out;
+ } else if (diff > 0) {
+ head = mid + 1;
+ startprfx = matched;
+@@ -147,45 +152,51 @@ exact_out:
+ if (likely(!IS_ERR(candidate)))
+ put_page(candidate);
+ candidate = page;
++ *_ndirents = ndirents;
+ } else {
+ put_page(page);
+
+- if (unlikely(mid < 1)) /* fix "mid" overflow */
+- break;
+-
+ back = mid - 1;
+ endprfx = matched;
+ }
++ continue;
+ }
++out: /* free if the candidate is valid */
++ if (!IS_ERR(candidate))
++ put_page(candidate);
++ return page;
+ }
+- *_diff = 1;
+ return candidate;
+ }
+
+ int erofs_namei(struct inode *dir,
+- struct qstr *name,
+- erofs_nid_t *nid, unsigned int *d_type)
++ struct qstr *name,
++ erofs_nid_t *nid, unsigned int *d_type)
+ {
+- int diff;
++ int ndirents;
+ struct page *page;
+- u8 *data;
++ void *data;
+ struct erofs_dirent *de;
++ struct erofs_qstr qn;
+
+ if (unlikely(!dir->i_size))
+ return -ENOENT;
+
+- diff = 1;
+- page = find_target_block_classic(dir, name, &diff);
++ qn.name = name->name;
++ qn.end = name->name + name->len;
++
++ ndirents = 0;
++ page = find_target_block_classic(dir, &qn, &ndirents);
+
+ if (unlikely(IS_ERR(page)))
+ return PTR_ERR(page);
+
+ data = kmap_atomic(page);
+ /* the target page has been mapped */
+- de = likely(diff) ?
+- /* since the rest of the last page is 0 */
+- find_target_dirent(name, data, EROFS_BLKSIZ) :
+- (struct erofs_dirent *)data;
++ if (ndirents)
++ de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
++ else
++ de = (struct erofs_dirent *)data;
+
+ if (likely(!IS_ERR(de))) {
+ *nid = le64_to_cpu(de->nid);
+diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
+index cb7fcd7c0ad8..c1e9ea621f41 100644
+--- a/drivers/usb/dwc3/dwc3-exynos.c
++++ b/drivers/usb/dwc3/dwc3-exynos.c
+@@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
+ for (i = 0; i < exynos->num_clks; i++) {
+ ret = clk_prepare_enable(exynos->clks[i]);
+ if (ret) {
+- while (--i > 0)
++ while (i-- > 0)
+ clk_disable_unprepare(exynos->clks[i]);
+ return ret;
+ }
+@@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev)
+ for (i = 0; i < exynos->num_clks; i++) {
+ ret = clk_prepare_enable(exynos->clks[i]);
+ if (ret) {
+- while (--i > 0)
++ while (i-- > 0)
+ clk_disable_unprepare(exynos->clks[i]);
+ return ret;
+ }
+diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
+index d7312eed6088..91ea3083e7ad 100644
+--- a/drivers/usb/phy/Kconfig
++++ b/drivers/usb/phy/Kconfig
+@@ -21,7 +21,7 @@ config AB8500_USB
+
+ config FSL_USB2_OTG
+ bool "Freescale USB OTG Transceiver Driver"
+- depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
++ depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
+ select USB_PHY
+ help
+diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
+index d441244b79df..28d9c2b1b3bb 100644
+--- a/fs/autofs/expire.c
++++ b/fs/autofs/expire.c
+@@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb,
+ pkt.len = dentry->d_name.len;
+ memcpy(pkt.name, dentry->d_name.name, pkt.len);
+ pkt.name[pkt.len] = '\0';
+- dput(dentry);
+
+ if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
+ ret = -EFAULT;
+@@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb,
+ complete_all(&ino->expire_complete);
+ spin_unlock(&sbi->fs_lock);
+
++ dput(dentry);
++
+ return ret;
+ }
+
+diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
+index 846c052569dd..3c14a8e45ffb 100644
+--- a/fs/autofs/inode.c
++++ b/fs/autofs/inode.c
+@@ -255,8 +255,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
+ }
+ root_inode = autofs_get_inode(s, S_IFDIR | 0755);
+ root = d_make_root(root_inode);
+- if (!root)
++ if (!root) {
++ ret = -ENOMEM;
+ goto fail_ino;
++ }
+ pipe = NULL;
+
+ root->d_fsdata = ino;
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 1286c2b95498..72e33ffa00ff 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -200,6 +200,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
+ struct buffer_head *head;
+ struct page *page;
+ int all_mapped = 1;
++ static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
+
+ index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
+ page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
+@@ -227,15 +228,15 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
+ * file io on the block device and getblk. It gets dealt with
+ * elsewhere, don't buffer_error if we had some unmapped buffers
+ */
+- if (all_mapped) {
+- printk("__find_get_block_slow() failed. "
+- "block=%llu, b_blocknr=%llu\n",
+- (unsigned long long)block,
+- (unsigned long long)bh->b_blocknr);
+- printk("b_state=0x%08lx, b_size=%zu\n",
+- bh->b_state, bh->b_size);
+- printk("device %pg blocksize: %d\n", bdev,
+- 1 << bd_inode->i_blkbits);
++ ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
++ if (all_mapped && __ratelimit(&last_warned)) {
++ printk("__find_get_block_slow() failed. block=%llu, "
++ "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
++ "device %pg blocksize: %d\n",
++ (unsigned long long)block,
++ (unsigned long long)bh->b_blocknr,
++ bh->b_state, bh->b_size, bdev,
++ 1 << bd_inode->i_blkbits);
+ }
+ out_unlock:
+ spin_unlock(&bd_mapping->private_lock);
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index 5671d5ee7f58..eaf39fb71f8b 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -84,8 +84,8 @@
+
+ #define NUMBER_OF_SMB2_COMMANDS 0x0013
+
+-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
+-#define MAX_SMB2_HDR_SIZE 0x00b0
++/* 52 transform hdr + 64 hdr + 88 create rsp */
++#define MAX_SMB2_HDR_SIZE 204
+
+ #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
+ #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
+diff --git a/fs/drop_caches.c b/fs/drop_caches.c
+index 82377017130f..d31b6c72b476 100644
+--- a/fs/drop_caches.c
++++ b/fs/drop_caches.c
+@@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ spin_lock(&inode->i_lock);
++ /*
++ * We must skip inodes in unusual state. We may also skip
++ * inodes without pages but we deliberately won't in case
++ * we need to reschedule to avoid softlockups.
++ */
+ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+- (inode->i_mapping->nrpages == 0)) {
++ (inode->i_mapping->nrpages == 0 && !need_resched())) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+@@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&sb->s_inode_list_lock);
+
++ cond_resched();
+ invalidate_mapping_pages(inode->i_mapping, 0, -1);
+ iput(toput_inode);
+ toput_inode = inode;
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 05431324b262..57cdce53b64b 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -107,7 +107,7 @@ static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
+
+ static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
+ {
+- u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
++ u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
+
+ return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
+ }
+diff --git a/fs/iomap.c b/fs/iomap.c
+index ce837d962d47..83d9a196fe3e 100644
+--- a/fs/iomap.c
++++ b/fs/iomap.c
+@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page)
+ atomic_set(&iop->read_count, 0);
+ atomic_set(&iop->write_count, 0);
+ bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
++
++ /*
++ * migrate_page_move_mapping() assumes that pages with private data have
++ * their count elevated by 1.
++ */
++ get_page(page);
+ set_page_private(page, (unsigned long)iop);
+ SetPagePrivate(page);
+ return iop;
+@@ -132,6 +138,7 @@ iomap_page_release(struct page *page)
+ WARN_ON_ONCE(atomic_read(&iop->write_count));
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
++ put_page(page);
+ kfree(iop);
+ }
+
+@@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
+
+ if (page_has_private(page)) {
+ ClearPagePrivate(page);
++ get_page(newpage);
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
++ put_page(page);
+ SetPagePrivate(newpage);
+ }
+
+@@ -1797,6 +1806,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t pos = iocb->ki_pos, start = pos;
+ loff_t end = iocb->ki_pos + count - 1, ret = 0;
+ unsigned int flags = IOMAP_DIRECT;
++ bool wait_for_completion = is_sync_kiocb(iocb);
+ struct blk_plug plug;
+ struct iomap_dio *dio;
+
+@@ -1816,7 +1826,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ dio->end_io = end_io;
+ dio->error = 0;
+ dio->flags = 0;
+- dio->wait_for_completion = is_sync_kiocb(iocb);
+
+ dio->submit.iter = iter;
+ dio->submit.waiter = current;
+@@ -1871,7 +1880,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ dio_warn_stale_pagecache(iocb->ki_filp);
+ ret = 0;
+
+- if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
++ if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
+ !inode->i_sb->s_dio_done_wq) {
+ ret = sb_init_dio_done_wq(inode->i_sb);
+ if (ret < 0)
+@@ -1887,7 +1896,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ if (ret <= 0) {
+ /* magic error code to fall back to buffered I/O */
+ if (ret == -ENOTBLK) {
+- dio->wait_for_completion = true;
++ wait_for_completion = true;
+ ret = 0;
+ }
+ break;
+@@ -1909,8 +1918,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ if (dio->flags & IOMAP_DIO_WRITE_FUA)
+ dio->flags &= ~IOMAP_DIO_NEED_SYNC;
+
++ /*
++ * We are about to drop our additional submission reference, which
++ * might be the last reference to the dio. There are three three
++ * different ways we can progress here:
++ *
++ * (a) If this is the last reference we will always complete and free
++ * the dio ourselves.
++ * (b) If this is not the last reference, and we serve an asynchronous
++ * iocb, we must never touch the dio after the decrement, the
++ * I/O completion handler will complete and free it.
++ * (c) If this is not the last reference, but we serve a synchronous
++ * iocb, the I/O completion handler will wake us up on the drop
++ * of the final reference, and we will complete and free it here
++ * after we got woken by the I/O completion handler.
++ */
++ dio->wait_for_completion = wait_for_completion;
+ if (!atomic_dec_and_test(&dio->ref)) {
+- if (!dio->wait_for_completion)
++ if (!wait_for_completion)
+ return -EIOCBQUEUED;
+
+ for (;;) {
+@@ -1927,9 +1952,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ __set_current_state(TASK_RUNNING);
+ }
+
+- ret = iomap_dio_complete(dio);
+-
+- return ret;
++ return iomap_dio_complete(dio);
+
+ out_free_dio:
+ kfree(dio);
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 5ef2c71348bd..6b666d187907 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1906,6 +1906,11 @@ static int nfs_parse_devname(const char *dev_name,
+ size_t len;
+ char *end;
+
++ if (unlikely(!dev_name || !*dev_name)) {
++ dfprintk(MOUNT, "NFS: device name not specified\n");
++ return -EINVAL;
++ }
++
+ /* Is the host name protected with square brakcets? */
+ if (*dev_name == '[') {
+ end = strchr(++dev_name, ']');
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index 8ae109429a88..e39bac94dead 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
+ inode = proc_get_inode(dir->i_sb, de);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+- d_set_d_op(dentry, &proc_misc_dentry_ops);
++ d_set_d_op(dentry, de->proc_dops);
+ return d_splice_alias(inode, dentry);
+ }
+ read_unlock(&proc_subdir_lock);
+@@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
+ INIT_LIST_HEAD(&ent->pde_openers);
+ proc_set_user(ent, (*parent)->uid, (*parent)->gid);
+
++ ent->proc_dops = &proc_misc_dentry_ops;
++
+ out:
+ return ent;
+ }
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index 5185d7f6a51e..95b14196f284 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -44,6 +44,7 @@ struct proc_dir_entry {
+ struct completion *pde_unload_completion;
+ const struct inode_operations *proc_iops;
+ const struct file_operations *proc_fops;
++ const struct dentry_operations *proc_dops;
+ union {
+ const struct seq_operations *seq_ops;
+ int (*single_show)(struct seq_file *, void *);
+diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
+index d5e0fcb3439e..a7b12435519e 100644
+--- a/fs/proc/proc_net.c
++++ b/fs/proc/proc_net.c
+@@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode)
+ return maybe_get_net(PDE_NET(PDE(inode)));
+ }
+
++static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
++{
++ return 0;
++}
++
++static const struct dentry_operations proc_net_dentry_ops = {
++ .d_revalidate = proc_net_d_revalidate,
++ .d_delete = always_delete_dentry,
++};
++
++static void pde_force_lookup(struct proc_dir_entry *pde)
++{
++ /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
++ pde->proc_dops = &proc_net_dentry_ops;
++}
++
+ static int seq_open_net(struct inode *inode, struct file *file)
+ {
+ unsigned int state_size = PDE(inode)->state_size;
+@@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
+ p = proc_create_reg(name, mode, &parent, data);
+ if (!p)
+ return NULL;
++ pde_force_lookup(p);
+ p->proc_fops = &proc_net_seq_fops;
+ p->seq_ops = ops;
+ p->state_size = state_size;
+@@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
+ p = proc_create_reg(name, mode, &parent, data);
+ if (!p)
+ return NULL;
++ pde_force_lookup(p);
+ p->proc_fops = &proc_net_seq_fops;
+ p->seq_ops = ops;
+ p->state_size = state_size;
+@@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
+ p = proc_create_reg(name, mode, &parent, data);
+ if (!p)
+ return NULL;
++ pde_force_lookup(p);
+ p->proc_fops = &proc_net_single_fops;
+ p->single_show = show;
+ return proc_register(parent, p);
+@@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
+ p = proc_create_reg(name, mode, &parent, data);
+ if (!p)
+ return NULL;
++ pde_force_lookup(p);
+ p->proc_fops = &proc_net_single_fops;
+ p->single_show = show;
+ p->write = write;
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index bfe1639df02d..97fc498dc767 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -47,6 +47,24 @@ static inline bool drm_arch_can_wc_memory(void)
+ return false;
+ #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+ return false;
++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ /*
++ * The DRM driver stack is designed to work with cache coherent devices
++ * only, but permits an optimization to be enabled in some cases, where
++ * for some buffers, both the CPU and the GPU use uncached mappings,
++ * removing the need for DMA snooping and allocation in the CPU caches.
++ *
++ * The use of uncached GPU mappings relies on the correct implementation
++ * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
++ * will use cached mappings nonetheless. On x86 platforms, this does not
++ * seem to matter, as uncached CPU mappings will snoop the caches in any
++ * case. However, on ARM and arm64, enabling this optimization on a
++ * platform where NoSnoop is ignored results in loss of coherency, which
++ * breaks correct operation of the device. Since we have no way of
++ * detecting whether NoSnoop works or not, just disable this
++ * optimization entirely for ARM and arm64.
++ */
++ return false;
+ #else
+ return true;
+ #endif
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index b776626aeb84..958eddbc44d3 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
+ return qdisc_skb_cb(skb)->data;
+ }
+
+-static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+- struct sk_buff *skb)
++static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
++ struct sk_buff *skb)
+ {
+ u8 *cb_data = bpf_skb_cb(skb);
+ u8 cb_saved[BPF_SKB_CB_LEN];
+@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+ return res;
+ }
+
++static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
++ struct sk_buff *skb)
++{
++ u32 res;
++
++ preempt_disable();
++ res = __bpf_prog_run_save_cb(prog, skb);
++ preempt_enable();
++ return res;
++}
++
+ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
+ struct sk_buff *skb)
+ {
+ u8 *cb_data = bpf_skb_cb(skb);
++ u32 res;
+
+ if (unlikely(prog->cb_access))
+ memset(cb_data, 0, BPF_SKB_CB_LEN);
+
+- return BPF_PROG_RUN(prog, skb);
++ preempt_disable();
++ res = BPF_PROG_RUN(prog, skb);
++ preempt_enable();
++ return res;
+ }
+
+ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
+diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
+index 071b4cbdf010..c848a7cc502e 100644
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -319,7 +319,7 @@
+ #define GITS_TYPER_PLPIS (1UL << 0)
+ #define GITS_TYPER_VLPIS (1UL << 1)
+ #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
+-#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
++#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
+ #define GITS_TYPER_IDBITS_SHIFT 8
+ #define GITS_TYPER_DEVBITS_SHIFT 13
+ #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index f428e86f4800..b5d99482d3fe 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -388,7 +388,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig);
+ #endif
+
+ #define siginmask(sig, mask) \
+- ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
++ ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
+
+ #define SIG_KERNEL_ONLY_MASK (\
+ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index 7ddfc65586b0..4335bd771ce5 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
+ struct clk *pclk;
+ struct clk *clk_ptp_ref;
+ unsigned int clk_ptp_rate;
++ unsigned int clk_ref_rate;
+ struct reset_control *stmmac_rst;
+ struct stmmac_axi *axi;
+ int has_gmac4;
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 841835a387e1..b4984bbbe157 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -469,9 +469,7 @@ struct nft_set_binding {
+ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding);
+ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+- struct nft_set_binding *binding);
+-void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
+- struct nft_set_binding *binding);
++ struct nft_set_binding *binding, bool commit);
+ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
+
+ /**
+@@ -721,6 +719,13 @@ struct nft_expr_type {
+ #define NFT_EXPR_STATEFUL 0x1
+ #define NFT_EXPR_GC 0x2
+
++enum nft_trans_phase {
++ NFT_TRANS_PREPARE,
++ NFT_TRANS_ABORT,
++ NFT_TRANS_COMMIT,
++ NFT_TRANS_RELEASE
++};
++
+ /**
+ * struct nft_expr_ops - nf_tables expression operations
+ *
+@@ -750,7 +755,8 @@ struct nft_expr_ops {
+ void (*activate)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr);
+ void (*deactivate)(const struct nft_ctx *ctx,
+- const struct nft_expr *expr);
++ const struct nft_expr *expr,
++ enum nft_trans_phase phase);
+ void (*destroy)(const struct nft_ctx *ctx,
+ const struct nft_expr *expr);
+ void (*destroy_clone)(const struct nft_ctx *ctx,
+@@ -1323,12 +1329,15 @@ struct nft_trans_rule {
+ struct nft_trans_set {
+ struct nft_set *set;
+ u32 set_id;
++ bool bound;
+ };
+
+ #define nft_trans_set(trans) \
+ (((struct nft_trans_set *)trans->data)->set)
+ #define nft_trans_set_id(trans) \
+ (((struct nft_trans_set *)trans->data)->set_id)
++#define nft_trans_set_bound(trans) \
++ (((struct nft_trans_set *)trans->data)->bound)
+
+ struct nft_trans_chain {
+ bool update;
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index 9425c2fb872f..6c24b1fb2db8 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
+ bpf_compute_and_save_data_end(skb, &saved_data_end);
+
+ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
+- bpf_prog_run_save_cb);
++ __bpf_prog_run_save_cb);
+ bpf_restore_data_end(skb, saved_data_end);
+ __skb_pull(skb, offset);
+ skb->sk = save_sk;
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 2c1790288138..3388c0b85a57 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -677,7 +677,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
+ }
+
+ if (htab_is_prealloc(htab)) {
+- pcpu_freelist_push(&htab->freelist, &l->fnode);
++ __pcpu_freelist_push(&htab->freelist, &l->fnode);
+ } else {
+ atomic_dec(&htab->count);
+ l->htab = htab;
+@@ -739,7 +739,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+ } else {
+ struct pcpu_freelist_node *l;
+
+- l = pcpu_freelist_pop(&htab->freelist);
++ l = __pcpu_freelist_pop(&htab->freelist);
+ if (!l)
+ return ERR_PTR(-E2BIG);
+ l_new = container_of(l, struct htab_elem, fnode);
+diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
+index 673fa6fe2d73..0c1b4ba9e90e 100644
+--- a/kernel/bpf/percpu_freelist.c
++++ b/kernel/bpf/percpu_freelist.c
+@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
+ free_percpu(s->freelist);
+ }
+
+-static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
+- struct pcpu_freelist_node *node)
++static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
++ struct pcpu_freelist_node *node)
+ {
+ raw_spin_lock(&head->lock);
+ node->next = head->first;
+@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
+ raw_spin_unlock(&head->lock);
+ }
+
+-void pcpu_freelist_push(struct pcpu_freelist *s,
++void __pcpu_freelist_push(struct pcpu_freelist *s,
+ struct pcpu_freelist_node *node)
+ {
+ struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
+
+- __pcpu_freelist_push(head, node);
++ ___pcpu_freelist_push(head, node);
++}
++
++void pcpu_freelist_push(struct pcpu_freelist *s,
++ struct pcpu_freelist_node *node)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ __pcpu_freelist_push(s, node);
++ local_irq_restore(flags);
+ }
+
+ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
+@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
+ for_each_possible_cpu(cpu) {
+ again:
+ head = per_cpu_ptr(s->freelist, cpu);
+- __pcpu_freelist_push(head, buf);
++ ___pcpu_freelist_push(head, buf);
+ i++;
+ buf += elem_size;
+ if (i == nr_elems)
+@@ -74,14 +84,12 @@ again:
+ local_irq_restore(flags);
+ }
+
+-struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
+ {
+ struct pcpu_freelist_head *head;
+ struct pcpu_freelist_node *node;
+- unsigned long flags;
+ int orig_cpu, cpu;
+
+- local_irq_save(flags);
+ orig_cpu = cpu = raw_smp_processor_id();
+ while (1) {
+ head = per_cpu_ptr(s->freelist, cpu);
+@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+ node = head->first;
+ if (node) {
+ head->first = node->next;
+- raw_spin_unlock_irqrestore(&head->lock, flags);
++ raw_spin_unlock(&head->lock);
+ return node;
+ }
+ raw_spin_unlock(&head->lock);
+ cpu = cpumask_next(cpu, cpu_possible_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = 0;
+- if (cpu == orig_cpu) {
+- local_irq_restore(flags);
++ if (cpu == orig_cpu)
+ return NULL;
+- }
+ }
+ }
++
++struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
++{
++ struct pcpu_freelist_node *ret;
++ unsigned long flags;
++
++ local_irq_save(flags);
++ ret = __pcpu_freelist_pop(s);
++ local_irq_restore(flags);
++ return ret;
++}
+diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
+index 3049aae8ea1e..c3960118e617 100644
+--- a/kernel/bpf/percpu_freelist.h
++++ b/kernel/bpf/percpu_freelist.h
+@@ -22,8 +22,12 @@ struct pcpu_freelist_node {
+ struct pcpu_freelist_node *next;
+ };
+
++/* pcpu_freelist_* do spin_lock_irqsave. */
+ void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
+ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
++/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
++void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
+ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
+ u32 nr_elems);
+ int pcpu_freelist_init(struct pcpu_freelist *);
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index cf5040fd5434..5f03ddf7b615 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -712,8 +712,13 @@ static int map_lookup_elem(union bpf_attr *attr)
+
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_lookup_elem(map, key, value);
+- } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+- map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
++ goto done;
++ }
++
++ preempt_disable();
++ this_cpu_inc(bpf_prog_active);
++ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
++ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+ err = bpf_percpu_hash_copy(map, key, value);
+ } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
+ err = bpf_percpu_array_copy(map, key, value);
+@@ -743,7 +748,10 @@ static int map_lookup_elem(union bpf_attr *attr)
+ }
+ rcu_read_unlock();
+ }
++ this_cpu_dec(bpf_prog_active);
++ preempt_enable();
+
++done:
+ if (err)
+ goto free_value;
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 699bc25d6204..18997730b665 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+ {
+- int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+-
+- if (ret || !write)
+- return ret;
+-
++ int ret;
++ int perf_cpu = sysctl_perf_cpu_time_max_percent;
+ /*
+ * If throttling is disabled don't allow the write:
+ */
+- if (sysctl_perf_cpu_time_max_percent == 100 ||
+- sysctl_perf_cpu_time_max_percent == 0)
++ if (write && (perf_cpu == 100 || perf_cpu == 0))
+ return -EINVAL;
+
++ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
++ if (ret || !write)
++ return ret;
++
+ max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
+ perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
+ update_perf_cpu_limits();
+diff --git a/kernel/relay.c b/kernel/relay.c
+index 04f248644e06..9e0f52375487 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
+ dentry = chan->cb->create_buf_file(tmpname, chan->parent,
+ S_IRUSR, buf,
+ &chan->is_global);
++ if (IS_ERR(dentry))
++ dentry = NULL;
+
+ kfree(tmpname);
+
+@@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
+ dentry = chan->cb->create_buf_file(NULL, NULL,
+ S_IRUSR, buf,
+ &chan->is_global);
+- if (WARN_ON(dentry))
++ if (IS_ERR_OR_NULL(dentry))
+ goto free_buf;
+ }
+
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index fe24de3fbc93..c3484785b179 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -124,6 +124,7 @@
+ * sampling of the aggregate task states would be.
+ */
+
++#include "../workqueue_internal.h"
+ #include <linux/sched/loadavg.h>
+ #include <linux/seq_file.h>
+ #include <linux/proc_fs.h>
+@@ -480,9 +481,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ groupc->tasks[t]++;
+
+ write_seqcount_end(&groupc->seq);
+-
+- if (!delayed_work_pending(&group->clock_work))
+- schedule_delayed_work(&group->clock_work, PSI_FREQ);
+ }
+
+ static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
+@@ -513,6 +511,7 @@ void psi_task_change(struct task_struct *task, int clear, int set)
+ {
+ int cpu = task_cpu(task);
+ struct psi_group *group;
++ bool wake_clock = true;
+ void *iter = NULL;
+
+ if (!task->pid)
+@@ -530,8 +529,22 @@ void psi_task_change(struct task_struct *task, int clear, int set)
+ task->psi_flags &= ~clear;
+ task->psi_flags |= set;
+
+- while ((group = iterate_groups(task, &iter)))
++ /*
++ * Periodic aggregation shuts off if there is a period of no
++ * task changes, so we wake it back up if necessary. However,
++ * don't do this if the task change is the aggregation worker
++ * itself going to sleep, or we'll ping-pong forever.
++ */
++ if (unlikely((clear & TSK_RUNNING) &&
++ (task->flags & PF_WQ_WORKER) &&
++ wq_worker_last_func(task) == psi_update_work))
++ wake_clock = false;
++
++ while ((group = iterate_groups(task, &iter))) {
+ psi_group_change(group, cpu, clear, set);
++ if (wake_clock && !delayed_work_pending(&group->clock_work))
++ schedule_delayed_work(&group->clock_work, PSI_FREQ);
++ }
+ }
+
+ void psi_memstall_tick(struct task_struct *task, int cpu)
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 9864a35c8bb5..6c28d519447d 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -1158,22 +1158,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
+
+ int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
+ {
+- int err;
+-
+- mutex_lock(&bpf_event_mutex);
+- err = __bpf_probe_register(btp, prog);
+- mutex_unlock(&bpf_event_mutex);
+- return err;
++ return __bpf_probe_register(btp, prog);
+ }
+
+ int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
+ {
+- int err;
+-
+- mutex_lock(&bpf_event_mutex);
+- err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
+- mutex_unlock(&bpf_event_mutex);
+- return err;
++ return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
+ }
+
+ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 0280deac392e..288b2105bbb1 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -909,6 +909,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
+ return to_wakeup ? to_wakeup->task : NULL;
+ }
+
++/**
++ * wq_worker_last_func - retrieve worker's last work function
++ *
++ * Determine the last function a worker executed. This is called from
++ * the scheduler to get a worker's last known identity.
++ *
++ * CONTEXT:
++ * spin_lock_irq(rq->lock)
++ *
++ * Return:
++ * The last work function %current executed as a worker, NULL if it
++ * hasn't executed any work yet.
++ */
++work_func_t wq_worker_last_func(struct task_struct *task)
++{
++ struct worker *worker = kthread_data(task);
++
++ return worker->last_func;
++}
++
+ /**
+ * worker_set_flags - set worker flags and adjust nr_running accordingly
+ * @worker: self
+@@ -2184,6 +2204,9 @@ __acquires(&pool->lock)
+ if (unlikely(cpu_intensive))
+ worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
+
++ /* tag the worker for identification in schedule() */
++ worker->last_func = worker->current_func;
++
+ /* we're done with it, release */
+ hash_del(&worker->hentry);
+ worker->current_work = NULL;
+diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
+index 66fbb5a9e633..cb68b03ca89a 100644
+--- a/kernel/workqueue_internal.h
++++ b/kernel/workqueue_internal.h
+@@ -53,6 +53,9 @@ struct worker {
+
+ /* used only by rescuers to point to the target workqueue */
+ struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
++
++ /* used by the scheduler to determine a worker's last known identity */
++ work_func_t last_func;
+ };
+
+ /**
+@@ -67,9 +70,10 @@ static inline struct worker *current_wq_worker(void)
+
+ /*
+ * Scheduler hooks for concurrency managed workqueue. Only to be used from
+- * sched/core.c and workqueue.c.
++ * sched/ and workqueue.c.
+ */
+ void wq_worker_waking_up(struct task_struct *task, int cpu);
+ struct task_struct *wq_worker_sleeping(struct task_struct *task);
++work_func_t wq_worker_last_func(struct task_struct *task);
+
+ #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
+diff --git a/lib/test_kmod.c b/lib/test_kmod.c
+index d82d022111e0..9cf77628fc91 100644
+--- a/lib/test_kmod.c
++++ b/lib/test_kmod.c
+@@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config)
+ config->test_driver = NULL;
+
+ kfree_const(config->test_fs);
+- config->test_driver = NULL;
++ config->test_fs = NULL;
+ }
+
+ static void kmod_config_free(struct kmod_test_device *test_dev)
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 21d94b5677e8..cb201163666f 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1189,11 +1189,13 @@ static inline int pageblock_free(struct page *page)
+ return PageBuddy(page) && page_order(page) >= pageblock_order;
+ }
+
+-/* Return the start of the next active pageblock after a given page */
+-static struct page *next_active_pageblock(struct page *page)
++/* Return the pfn of the start of the next active pageblock after a given pfn */
++static unsigned long next_active_pageblock(unsigned long pfn)
+ {
++ struct page *page = pfn_to_page(pfn);
++
+ /* Ensure the starting page is pageblock-aligned */
+- BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
++ BUG_ON(pfn & (pageblock_nr_pages - 1));
+
+ /* If the entire pageblock is free, move to the end of free page */
+ if (pageblock_free(page)) {
+@@ -1201,16 +1203,16 @@ static struct page *next_active_pageblock(struct page *page)
+ /* be careful. we don't have locks, page_order can be changed.*/
+ order = page_order(page);
+ if ((order < MAX_ORDER) && (order >= pageblock_order))
+- return page + (1 << order);
++ return pfn + (1 << order);
+ }
+
+- return page + pageblock_nr_pages;
++ return pfn + pageblock_nr_pages;
+ }
+
+-static bool is_pageblock_removable_nolock(struct page *page)
++static bool is_pageblock_removable_nolock(unsigned long pfn)
+ {
++ struct page *page = pfn_to_page(pfn);
+ struct zone *zone;
+- unsigned long pfn;
+
+ /*
+ * We have to be careful here because we are iterating over memory
+@@ -1233,12 +1235,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
+ /* Checks if this range of memory is likely to be hot-removable. */
+ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
+ {
+- struct page *page = pfn_to_page(start_pfn);
+- struct page *end_page = page + nr_pages;
++ unsigned long end_pfn, pfn;
++
++ end_pfn = min(start_pfn + nr_pages,
++ zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
+
+ /* Check the starting page of each pageblock within the range */
+- for (; page < end_page; page = next_active_pageblock(page)) {
+- if (!is_pageblock_removable_nolock(page))
++ for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
++ if (!is_pageblock_removable_nolock(pfn))
+ return false;
+ cond_resched();
+ }
+@@ -1274,6 +1278,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+ i++;
+ if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
+ continue;
++ /* Check if we got outside of the zone */
++ if (zone && !zone_spans_pfn(zone, pfn + i))
++ return 0;
+ page = pfn_to_page(pfn + i);
+ if (zone && page_zone(page) != zone)
+ return 0;
+diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
+index e8090f099eb8..ef0dec20c7d8 100644
+--- a/net/batman-adv/bat_v_elp.c
++++ b/net/batman-adv/bat_v_elp.c
+@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
+
+ ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
+
++ /* free the TID stats immediately */
++ cfg80211_sinfo_release_content(&sinfo);
++
+ dev_put(real_netdev);
+ if (ret == -ENOENT) {
+ /* Node is not associated anymore! It would be
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 5e55cef0cec3..6693e209efe8 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -2293,9 +2293,12 @@ static int compat_do_replace(struct net *net, void __user *user,
+
+ xt_compat_lock(NFPROTO_BRIDGE);
+
+- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
+- if (ret < 0)
+- goto out_unlock;
++ if (tmp.nentries) {
++ ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
++ if (ret < 0)
++ goto out_unlock;
++ }
++
+ ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
+ if (ret < 0)
+ goto out_unlock;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 16350f8c8815..821050957aca 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3927,10 +3927,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
+ /* Only some socketops are supported */
+ switch (optname) {
+ case SO_RCVBUF:
++ val = min_t(u32, val, sysctl_rmem_max);
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+ break;
+ case SO_SNDBUF:
++ val = min_t(u32, val, sysctl_wmem_max);
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+ break;
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 54d854807630..97fc71d90159 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -545,8 +545,8 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
+ struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
+
+ /* No sk_callback_lock since already detached. */
+- if (psock->parser.enabled)
+- strp_done(&psock->parser.strp);
++ strp_stop(&psock->parser.strp);
++ strp_done(&psock->parser.strp);
+
+ cancel_work_sync(&psock->work);
+
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index d7b43e700023..68a21bf75dd0 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -74,6 +74,33 @@ drop:
+ return 0;
+ }
+
++static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
++ int encap_type)
++{
++ struct ip_tunnel *tunnel;
++ const struct iphdr *iph = ip_hdr(skb);
++ struct net *net = dev_net(skb->dev);
++ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
++
++ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
++ iph->saddr, iph->daddr, 0);
++ if (tunnel) {
++ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
++ goto drop;
++
++ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
++
++ skb->dev = tunnel->dev;
++
++ return xfrm_input(skb, nexthdr, spi, encap_type);
++ }
++
++ return -EINVAL;
++drop:
++ kfree_skb(skb);
++ return 0;
++}
++
+ static int vti_rcv(struct sk_buff *skb)
+ {
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+@@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb)
+ return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
+ }
+
++static int vti_rcv_ipip(struct sk_buff *skb)
++{
++ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
++ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
++
++ return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
++}
++
+ static int vti_rcv_cb(struct sk_buff *skb, int err)
+ {
+ unsigned short family;
+@@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
+ .priority = 100,
+ };
+
++static struct xfrm_tunnel ipip_handler __read_mostly = {
++ .handler = vti_rcv_ipip,
++ .err_handler = vti4_err,
++ .priority = 0,
++};
++
+ static int __net_init vti_init_net(struct net *net)
+ {
+ int err;
+@@ -603,6 +644,13 @@ static int __init vti_init(void)
+ if (err < 0)
+ goto xfrm_proto_comp_failed;
+
++ msg = "ipip tunnel";
++ err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
++ if (err < 0) {
++ pr_info("%s: cant't register tunnel\n",__func__);
++ goto xfrm_tunnel_failed;
++ }
++
+ msg = "netlink interface";
+ err = rtnl_link_register(&vti_link_ops);
+ if (err < 0)
+@@ -612,6 +660,8 @@ static int __init vti_init(void)
+
+ rtnl_link_failed:
+ xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
++xfrm_tunnel_failed:
++ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+ xfrm_proto_comp_failed:
+ xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+ xfrm_proto_ah_failed:
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 432141f04af3..7d6318664eb2 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2220,6 +2220,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user
+ u->tcp_fin_timeout,
+ u->udp_timeout);
+
++#ifdef CONFIG_IP_VS_PROTO_TCP
++ if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
++ u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
++ return -EINVAL;
++ }
++#endif
++
++#ifdef CONFIG_IP_VS_PROTO_UDP
++ if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
++ return -EINVAL;
++#endif
++
+ #ifdef CONFIG_IP_VS_PROTO_TCP
+ if (u->tcp_timeout) {
+ pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index e92e749aff53..830b1328fe97 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ }
+
+ if (nf_ct_key_equal(h, tuple, zone, net)) {
++ /* Tuple is taken already, so caller will need to find
++ * a new source port to use.
++ *
++ * Only exception:
++ * If the *original tuples* are identical, then both
++ * conntracks refer to the same flow.
++ * This is a rare situation, it can occur e.g. when
++ * more than one UDP packet is sent from same socket
++ * in different threads.
++ *
++ * Let nf_ct_resolve_clash() deal with this later.
++ */
++ if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
++ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
++ continue;
++
+ NF_CT_STAT_INC_ATOMIC(net, found);
+ rcu_read_unlock();
+ return 1;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 5114a0d2a41e..36d4717fee3b 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -116,6 +116,23 @@ static void nft_trans_destroy(struct nft_trans *trans)
+ kfree(trans);
+ }
+
++static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
++{
++ struct net *net = ctx->net;
++ struct nft_trans *trans;
++
++ if (!nft_set_is_anonymous(set))
++ return;
++
++ list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
++ if (trans->msg_type == NFT_MSG_NEWSET &&
++ nft_trans_set(trans) == set) {
++ nft_trans_set_bound(trans) = true;
++ break;
++ }
++ }
++}
++
+ static int nf_tables_register_hook(struct net *net,
+ const struct nft_table *table,
+ struct nft_chain *chain)
+@@ -211,18 +228,6 @@ static int nft_delchain(struct nft_ctx *ctx)
+ return err;
+ }
+
+-/* either expr ops provide both activate/deactivate, or neither */
+-static bool nft_expr_check_ops(const struct nft_expr_ops *ops)
+-{
+- if (!ops)
+- return true;
+-
+- if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate)))
+- return false;
+-
+- return true;
+-}
+-
+ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
+ struct nft_rule *rule)
+ {
+@@ -238,14 +243,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
+ }
+
+ static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
+- struct nft_rule *rule)
++ struct nft_rule *rule,
++ enum nft_trans_phase phase)
+ {
+ struct nft_expr *expr;
+
+ expr = nft_expr_first(rule);
+ while (expr != nft_expr_last(rule) && expr->ops) {
+ if (expr->ops->deactivate)
+- expr->ops->deactivate(ctx, expr);
++ expr->ops->deactivate(ctx, expr, phase);
+
+ expr = nft_expr_next(expr);
+ }
+@@ -296,7 +302,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
+ nft_trans_destroy(trans);
+ return err;
+ }
+- nft_rule_expr_deactivate(ctx, rule);
++ nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
+
+ return 0;
+ }
+@@ -1932,9 +1938,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
+ */
+ int nft_register_expr(struct nft_expr_type *type)
+ {
+- if (!nft_expr_check_ops(type->ops))
+- return -EINVAL;
+-
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ if (type->family == NFPROTO_UNSPEC)
+ list_add_tail_rcu(&type->list, &nf_tables_expressions);
+@@ -2082,10 +2085,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
+ err = PTR_ERR(ops);
+ goto err1;
+ }
+- if (!nft_expr_check_ops(ops)) {
+- err = -EINVAL;
+- goto err1;
+- }
+ } else
+ ops = type->ops;
+
+@@ -2482,7 +2481,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
+ static void nf_tables_rule_release(const struct nft_ctx *ctx,
+ struct nft_rule *rule)
+ {
+- nft_rule_expr_deactivate(ctx, rule);
++ nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
+ nf_tables_rule_destroy(ctx, rule);
+ }
+
+@@ -3679,39 +3678,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ bind:
+ binding->chain = ctx->chain;
+ list_add_tail_rcu(&binding->list, &set->bindings);
++ nft_set_trans_bind(ctx, set);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(nf_tables_bind_set);
+
+-void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
+- struct nft_set_binding *binding)
+-{
+- if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
+- nft_is_active(ctx->net, set))
+- list_add_tail_rcu(&set->list, &ctx->table->sets);
+-
+- list_add_tail_rcu(&binding->list, &set->bindings);
+-}
+-EXPORT_SYMBOL_GPL(nf_tables_rebind_set);
+-
+ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+- struct nft_set_binding *binding)
++ struct nft_set_binding *binding, bool event)
+ {
+ list_del_rcu(&binding->list);
+
+- if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
+- nft_is_active(ctx->net, set))
++ if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
+ list_del_rcu(&set->list);
++ if (event)
++ nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
++ GFP_KERNEL);
++ }
+ }
+ EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
+
+ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+- if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
+- nft_is_active(ctx->net, set)) {
+- nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
++ if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
+ nft_set_destroy(set);
+- }
+ }
+ EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
+
+@@ -6504,6 +6494,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ nf_tables_rule_notify(&trans->ctx,
+ nft_trans_rule(trans),
+ NFT_MSG_DELRULE);
++ nft_rule_expr_deactivate(&trans->ctx,
++ nft_trans_rule(trans),
++ NFT_TRANS_COMMIT);
+ break;
+ case NFT_MSG_NEWSET:
+ nft_clear(net, nft_trans_set(trans));
+@@ -6590,7 +6583,8 @@ static void nf_tables_abort_release(struct nft_trans *trans)
+ nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+ break;
+ case NFT_MSG_NEWSET:
+- nft_set_destroy(nft_trans_set(trans));
++ if (!nft_trans_set_bound(trans))
++ nft_set_destroy(nft_trans_set(trans));
+ break;
+ case NFT_MSG_NEWSETELEM:
+ nft_set_elem_destroy(nft_trans_elem_set(trans),
+@@ -6651,7 +6645,9 @@ static int __nf_tables_abort(struct net *net)
+ case NFT_MSG_NEWRULE:
+ trans->ctx.chain->use--;
+ list_del_rcu(&nft_trans_rule(trans)->list);
+- nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
++ nft_rule_expr_deactivate(&trans->ctx,
++ nft_trans_rule(trans),
++ NFT_TRANS_ABORT);
+ break;
+ case NFT_MSG_DELRULE:
+ trans->ctx.chain->use++;
+@@ -6661,7 +6657,8 @@ static int __nf_tables_abort(struct net *net)
+ break;
+ case NFT_MSG_NEWSET:
+ trans->ctx.table->use--;
+- list_del_rcu(&nft_trans_set(trans)->list);
++ if (!nft_trans_set_bound(trans))
++ list_del_rcu(&nft_trans_set(trans)->list);
+ break;
+ case NFT_MSG_DELSET:
+ trans->ctx.table->use++;
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index c90a4640723f..0a4bad55a8aa 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -22,11 +22,15 @@
+ #include <linux/netfilter_bridge/ebtables.h>
+ #include <linux/netfilter_arp/arp_tables.h>
+ #include <net/netfilter/nf_tables.h>
++#include <net/netns/generic.h>
+
+ struct nft_xt {
+ struct list_head head;
+ struct nft_expr_ops ops;
+- unsigned int refcnt;
++ refcount_t refcnt;
++
++ /* used only when transaction mutex is locked */
++ unsigned int listcnt;
+
+ /* Unlike other expressions, ops doesn't have static storage duration.
+ * nft core assumes they do. We use kfree_rcu so that nft core can
+@@ -43,10 +47,39 @@ struct nft_xt_match_priv {
+ void *info;
+ };
+
++struct nft_compat_net {
++ struct list_head nft_target_list;
++ struct list_head nft_match_list;
++};
++
++static unsigned int nft_compat_net_id __read_mostly;
++static struct nft_expr_type nft_match_type;
++static struct nft_expr_type nft_target_type;
++
++static struct nft_compat_net *nft_compat_pernet(struct net *net)
++{
++ return net_generic(net, nft_compat_net_id);
++}
++
++static void nft_xt_get(struct nft_xt *xt)
++{
++ /* refcount_inc() warns on 0 -> 1 transition, but we can't
++ * init the reference count to 1 in .select_ops -- we can't
++ * undo such an increase when another expression inside the same
++ * rule fails afterwards.
++ */
++ if (xt->listcnt == 0)
++ refcount_set(&xt->refcnt, 1);
++ else
++ refcount_inc(&xt->refcnt);
++
++ xt->listcnt++;
++}
++
+ static bool nft_xt_put(struct nft_xt *xt)
+ {
+- if (--xt->refcnt == 0) {
+- list_del(&xt->head);
++ if (refcount_dec_and_test(&xt->refcnt)) {
++ WARN_ON_ONCE(!list_empty(&xt->head));
+ kfree_rcu(xt, rcu_head);
+ return true;
+ }
+@@ -273,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ return -EINVAL;
+
+ nft_xt = container_of(expr->ops, struct nft_xt, ops);
+- nft_xt->refcnt++;
++ nft_xt_get(nft_xt);
+ return 0;
+ }
+
+@@ -487,7 +520,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ return ret;
+
+ nft_xt = container_of(expr->ops, struct nft_xt, ops);
+- nft_xt->refcnt++;
++ nft_xt_get(nft_xt);
+ return 0;
+ }
+
+@@ -541,6 +574,18 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+ __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
+ }
+
++static void nft_compat_deactivate(const struct nft_ctx *ctx,
++ const struct nft_expr *expr,
++ enum nft_trans_phase phase)
++{
++ struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
++
++ if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) {
++ if (--xt->listcnt == 0)
++ list_del_init(&xt->head);
++ }
++}
++
+ static void
+ nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+ {
+@@ -735,10 +780,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
+ .cb = nfnl_nft_compat_cb,
+ };
+
+-static LIST_HEAD(nft_match_list);
+-
+-static struct nft_expr_type nft_match_type;
+-
+ static bool nft_match_cmp(const struct xt_match *match,
+ const char *name, u32 rev, u32 family)
+ {
+@@ -750,6 +791,7 @@ static const struct nft_expr_ops *
+ nft_match_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+ {
++ struct nft_compat_net *cn;
+ struct nft_xt *nft_match;
+ struct xt_match *match;
+ unsigned int matchsize;
+@@ -766,8 +808,10 @@ nft_match_select_ops(const struct nft_ctx *ctx,
+ rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
+ family = ctx->family;
+
++ cn = nft_compat_pernet(ctx->net);
++
+ /* Re-use the existing match if it's already loaded. */
+- list_for_each_entry(nft_match, &nft_match_list, head) {
++ list_for_each_entry(nft_match, &cn->nft_match_list, head) {
+ struct xt_match *match = nft_match->ops.data;
+
+ if (nft_match_cmp(match, mt_name, rev, family))
+@@ -790,11 +834,12 @@ nft_match_select_ops(const struct nft_ctx *ctx,
+ goto err;
+ }
+
+- nft_match->refcnt = 0;
++ refcount_set(&nft_match->refcnt, 0);
+ nft_match->ops.type = &nft_match_type;
+ nft_match->ops.eval = nft_match_eval;
+ nft_match->ops.init = nft_match_init;
+ nft_match->ops.destroy = nft_match_destroy;
++ nft_match->ops.deactivate = nft_compat_deactivate;
+ nft_match->ops.dump = nft_match_dump;
+ nft_match->ops.validate = nft_match_validate;
+ nft_match->ops.data = match;
+@@ -811,7 +856,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
+
+ nft_match->ops.size = matchsize;
+
+- list_add(&nft_match->head, &nft_match_list);
++ nft_match->listcnt = 0;
++ list_add(&nft_match->head, &cn->nft_match_list);
+
+ return &nft_match->ops;
+ err:
+@@ -827,10 +873,6 @@ static struct nft_expr_type nft_match_type __read_mostly = {
+ .owner = THIS_MODULE,
+ };
+
+-static LIST_HEAD(nft_target_list);
+-
+-static struct nft_expr_type nft_target_type;
+-
+ static bool nft_target_cmp(const struct xt_target *tg,
+ const char *name, u32 rev, u32 family)
+ {
+@@ -842,6 +884,7 @@ static const struct nft_expr_ops *
+ nft_target_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+ {
++ struct nft_compat_net *cn;
+ struct nft_xt *nft_target;
+ struct xt_target *target;
+ char *tg_name;
+@@ -862,8 +905,9 @@ nft_target_select_ops(const struct nft_ctx *ctx,
+ strcmp(tg_name, "standard") == 0)
+ return ERR_PTR(-EINVAL);
+
++ cn = nft_compat_pernet(ctx->net);
+ /* Re-use the existing target if it's already loaded. */
+- list_for_each_entry(nft_target, &nft_target_list, head) {
++ list_for_each_entry(nft_target, &cn->nft_target_list, head) {
+ struct xt_target *target = nft_target->ops.data;
+
+ if (!target->target)
+@@ -894,11 +938,12 @@ nft_target_select_ops(const struct nft_ctx *ctx,
+ goto err;
+ }
+
+- nft_target->refcnt = 0;
++ refcount_set(&nft_target->refcnt, 0);
+ nft_target->ops.type = &nft_target_type;
+ nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
+ nft_target->ops.init = nft_target_init;
+ nft_target->ops.destroy = nft_target_destroy;
++ nft_target->ops.deactivate = nft_compat_deactivate;
+ nft_target->ops.dump = nft_target_dump;
+ nft_target->ops.validate = nft_target_validate;
+ nft_target->ops.data = target;
+@@ -908,7 +953,8 @@ nft_target_select_ops(const struct nft_ctx *ctx,
+ else
+ nft_target->ops.eval = nft_target_eval_xt;
+
+- list_add(&nft_target->head, &nft_target_list);
++ nft_target->listcnt = 0;
++ list_add(&nft_target->head, &cn->nft_target_list);
+
+ return &nft_target->ops;
+ err:
+@@ -924,13 +970,74 @@ static struct nft_expr_type nft_target_type __read_mostly = {
+ .owner = THIS_MODULE,
+ };
+
++static int __net_init nft_compat_init_net(struct net *net)
++{
++ struct nft_compat_net *cn = nft_compat_pernet(net);
++
++ INIT_LIST_HEAD(&cn->nft_target_list);
++ INIT_LIST_HEAD(&cn->nft_match_list);
++
++ return 0;
++}
++
++static void __net_exit nft_compat_exit_net(struct net *net)
++{
++ struct nft_compat_net *cn = nft_compat_pernet(net);
++ struct nft_xt *xt, *next;
++
++ if (list_empty(&cn->nft_match_list) &&
++ list_empty(&cn->nft_target_list))
++ return;
++
++ /* If there was an error that caused nft_xt expr to not be initialized
++ * fully and noone else requested the same expression later, the lists
++ * contain 0-refcount entries that still hold module reference.
++ *
++ * Clean them here.
++ */
++ mutex_lock(&net->nft.commit_mutex);
++ list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) {
++ struct xt_target *target = xt->ops.data;
++
++ list_del_init(&xt->head);
++
++ if (refcount_read(&xt->refcnt))
++ continue;
++ module_put(target->me);
++ kfree(xt);
++ }
++
++ list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) {
++ struct xt_match *match = xt->ops.data;
++
++ list_del_init(&xt->head);
++
++ if (refcount_read(&xt->refcnt))
++ continue;
++ module_put(match->me);
++ kfree(xt);
++ }
++ mutex_unlock(&net->nft.commit_mutex);
++}
++
++static struct pernet_operations nft_compat_net_ops = {
++ .init = nft_compat_init_net,
++ .exit = nft_compat_exit_net,
++ .id = &nft_compat_net_id,
++ .size = sizeof(struct nft_compat_net),
++};
++
+ static int __init nft_compat_module_init(void)
+ {
+ int ret;
+
++ ret = register_pernet_subsys(&nft_compat_net_ops);
++ if (ret < 0)
++ goto err_target;
++
+ ret = nft_register_expr(&nft_match_type);
+ if (ret < 0)
+- return ret;
++ goto err_pernet;
+
+ ret = nft_register_expr(&nft_target_type);
+ if (ret < 0)
+@@ -943,45 +1050,21 @@ static int __init nft_compat_module_init(void)
+ }
+
+ return ret;
+-
+ err_target:
+ nft_unregister_expr(&nft_target_type);
+ err_match:
+ nft_unregister_expr(&nft_match_type);
++err_pernet:
++ unregister_pernet_subsys(&nft_compat_net_ops);
+ return ret;
+ }
+
+ static void __exit nft_compat_module_exit(void)
+ {
+- struct nft_xt *xt, *next;
+-
+- /* list should be empty here, it can be non-empty only in case there
+- * was an error that caused nft_xt expr to not be initialized fully
+- * and noone else requested the same expression later.
+- *
+- * In this case, the lists contain 0-refcount entries that still
+- * hold module reference.
+- */
+- list_for_each_entry_safe(xt, next, &nft_target_list, head) {
+- struct xt_target *target = xt->ops.data;
+-
+- if (WARN_ON_ONCE(xt->refcnt))
+- continue;
+- module_put(target->me);
+- kfree(xt);
+- }
+-
+- list_for_each_entry_safe(xt, next, &nft_match_list, head) {
+- struct xt_match *match = xt->ops.data;
+-
+- if (WARN_ON_ONCE(xt->refcnt))
+- continue;
+- module_put(match->me);
+- kfree(xt);
+- }
+ nfnetlink_subsys_unregister(&nfnl_compat_subsys);
+ nft_unregister_expr(&nft_target_type);
+ nft_unregister_expr(&nft_match_type);
++ unregister_pernet_subsys(&nft_compat_net_ops);
+ }
+
+ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 07d4efd3d851..f1172f99752b 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -235,20 +235,17 @@ err1:
+ return err;
+ }
+
+-static void nft_dynset_activate(const struct nft_ctx *ctx,
+- const struct nft_expr *expr)
+-{
+- struct nft_dynset *priv = nft_expr_priv(expr);
+-
+- nf_tables_rebind_set(ctx, priv->set, &priv->binding);
+-}
+-
+ static void nft_dynset_deactivate(const struct nft_ctx *ctx,
+- const struct nft_expr *expr)
++ const struct nft_expr *expr,
++ enum nft_trans_phase phase)
+ {
+ struct nft_dynset *priv = nft_expr_priv(expr);
+
+- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
++ if (phase == NFT_TRANS_PREPARE)
++ return;
++
++ nf_tables_unbind_set(ctx, priv->set, &priv->binding,
++ phase == NFT_TRANS_COMMIT);
+ }
+
+ static void nft_dynset_destroy(const struct nft_ctx *ctx,
+@@ -296,7 +293,6 @@ static const struct nft_expr_ops nft_dynset_ops = {
+ .eval = nft_dynset_eval,
+ .init = nft_dynset_init,
+ .destroy = nft_dynset_destroy,
+- .activate = nft_dynset_activate,
+ .deactivate = nft_dynset_deactivate,
+ .dump = nft_dynset_dump,
+ };
+diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
+index 0777a93211e2..3f6d1d2a6281 100644
+--- a/net/netfilter/nft_immediate.c
++++ b/net/netfilter/nft_immediate.c
+@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
+ }
+
+ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
+- const struct nft_expr *expr)
++ const struct nft_expr *expr,
++ enum nft_trans_phase phase)
+ {
+ const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
++ if (phase == NFT_TRANS_COMMIT)
++ return;
++
+ return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
+ }
+
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 227b2b15a19c..14496da5141d 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
+ return 0;
+ }
+
+-static void nft_lookup_activate(const struct nft_ctx *ctx,
+- const struct nft_expr *expr)
+-{
+- struct nft_lookup *priv = nft_expr_priv(expr);
+-
+- nf_tables_rebind_set(ctx, priv->set, &priv->binding);
+-}
+-
+ static void nft_lookup_deactivate(const struct nft_ctx *ctx,
+- const struct nft_expr *expr)
++ const struct nft_expr *expr,
++ enum nft_trans_phase phase)
+ {
+ struct nft_lookup *priv = nft_expr_priv(expr);
+
+- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
++ if (phase == NFT_TRANS_PREPARE)
++ return;
++
++ nf_tables_unbind_set(ctx, priv->set, &priv->binding,
++ phase == NFT_TRANS_COMMIT);
+ }
+
+ static void nft_lookup_destroy(const struct nft_ctx *ctx,
+@@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = {
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
+ .eval = nft_lookup_eval,
+ .init = nft_lookup_init,
+- .activate = nft_lookup_activate,
+ .deactivate = nft_lookup_deactivate,
+ .destroy = nft_lookup_destroy,
+ .dump = nft_lookup_dump,
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index a3185ca2a3a9..ae178e914486 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -155,20 +155,17 @@ nla_put_failure:
+ return -1;
+ }
+
+-static void nft_objref_map_activate(const struct nft_ctx *ctx,
+- const struct nft_expr *expr)
+-{
+- struct nft_objref_map *priv = nft_expr_priv(expr);
+-
+- nf_tables_rebind_set(ctx, priv->set, &priv->binding);
+-}
+-
+ static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
+- const struct nft_expr *expr)
++ const struct nft_expr *expr,
++ enum nft_trans_phase phase)
+ {
+ struct nft_objref_map *priv = nft_expr_priv(expr);
+
+- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
++ if (phase == NFT_TRANS_PREPARE)
++ return;
++
++ nf_tables_unbind_set(ctx, priv->set, &priv->binding,
++ phase == NFT_TRANS_COMMIT);
+ }
+
+ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
+@@ -185,7 +182,6 @@ static const struct nft_expr_ops nft_objref_map_ops = {
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
+ .eval = nft_objref_map_eval,
+ .init = nft_objref_map_init,
+- .activate = nft_objref_map_activate,
+ .deactivate = nft_objref_map_deactivate,
+ .destroy = nft_objref_map_destroy,
+ .dump = nft_objref_map_dump,
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index 5d3cce9e8744..15eb5d3d4750 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
+ {
+ struct virtio_vsock *vsock = virtio_vsock_get();
+
++ if (!vsock)
++ return VMADDR_CID_ANY;
++
+ return vsock->guest_cid;
+ }
+
+@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+
+ virtio_vsock_update_guest_cid(vsock);
+
+- ret = vsock_core_init(&virtio_transport.transport);
+- if (ret < 0)
+- goto out_vqs;
+-
+ vsock->rx_buf_nr = 0;
+ vsock->rx_buf_max_nr = 0;
+ atomic_set(&vsock->queued_replies, 0);
+@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+ mutex_unlock(&the_virtio_vsock_mutex);
+ return 0;
+
+-out_vqs:
+- vsock->vdev->config->del_vqs(vsock->vdev);
+ out:
+ kfree(vsock);
+ mutex_unlock(&the_virtio_vsock_mutex);
+@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
+ flush_work(&vsock->event_work);
+ flush_work(&vsock->send_pkt_work);
+
++ /* Reset all connected sockets when the device disappear */
++ vsock_for_each_connected_socket(virtio_vsock_reset_sock);
++
+ vdev->config->reset(vdev);
+
+ mutex_lock(&vsock->rx_lock);
+@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
+
+ mutex_lock(&the_virtio_vsock_mutex);
+ the_virtio_vsock = NULL;
+- vsock_core_exit();
+ mutex_unlock(&the_virtio_vsock_mutex);
+
+ vdev->config->del_vqs(vdev);
+@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void)
+ virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
+ if (!virtio_vsock_workqueue)
+ return -ENOMEM;
++
+ ret = register_virtio_driver(&virtio_vsock_driver);
+ if (ret)
+- destroy_workqueue(virtio_vsock_workqueue);
++ goto out_wq;
++
++ ret = vsock_core_init(&virtio_transport.transport);
++ if (ret)
++ goto out_vdr;
++
++ return 0;
++
++out_vdr:
++ unregister_virtio_driver(&virtio_vsock_driver);
++out_wq:
++ destroy_workqueue(virtio_vsock_workqueue);
+ return ret;
++
+ }
+
+ static void __exit virtio_vsock_exit(void)
+ {
++ vsock_core_exit();
+ unregister_virtio_driver(&virtio_vsock_driver);
+ destroy_workqueue(virtio_vsock_workqueue);
+ }
+diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
+index 08c88de0ffda..11975ec8d566 100644
+--- a/security/apparmor/domain.c
++++ b/security/apparmor/domain.c
+@@ -1444,7 +1444,10 @@ check:
+ new = aa_label_merge(label, target, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(new)) {
+ info = "failed to build target label";
+- error = PTR_ERR(new);
++ if (!new)
++ error = -ENOMEM;
++ else
++ error = PTR_ERR(new);
+ new = NULL;
+ perms.allow = 0;
+ goto audit;
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index 42446a216f3b..7d1eeb084968 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -1598,12 +1598,14 @@ static unsigned int apparmor_ipv4_postroute(void *priv,
+ return apparmor_ip_postroute(priv, skb, state);
+ }
+
++#if IS_ENABLED(CONFIG_IPV6)
+ static unsigned int apparmor_ipv6_postroute(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+ {
+ return apparmor_ip_postroute(priv, skb, state);
+ }
++#endif
+
+ static const struct nf_hook_ops apparmor_nf_ops[] = {
+ {
+diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
+index 9988d5c126b6..94094168c4a6 100644
+--- a/tools/bpf/bpftool/map.c
++++ b/tools/bpf/bpftool/map.c
+@@ -439,6 +439,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val,
+ return argv + i;
+ }
+
++/* on per cpu maps we must copy the provided value on all value instances */
++static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
++{
++ unsigned int i, n, step;
++
++ if (!map_is_per_cpu(info->type))
++ return;
++
++ n = get_possible_cpus();
++ step = round_up(info->value_size, 8);
++ for (i = 1; i < n; i++)
++ memcpy(value + i * step, value, info->value_size);
++}
++
+ static int parse_elem(char **argv, struct bpf_map_info *info,
+ void *key, void *value, __u32 key_size, __u32 value_size,
+ __u32 *flags, __u32 **value_fd)
+@@ -518,6 +532,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
+ argv = parse_bytes(argv, "value", value, value_size);
+ if (!argv)
+ return -1;
++
++ fill_per_cpu_value(info, value);
+ }
+
+ return parse_elem(argv, info, key, NULL, key_size, value_size,
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index 69b01a6158bd..91b9de5f4e17 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -130,13 +130,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
+
+ static int prog_fd_by_tag(unsigned char *tag)
+ {
+- struct bpf_prog_info info = {};
+- __u32 len = sizeof(info);
+ unsigned int id = 0;
+ int err;
+ int fd;
+
+ while (true) {
++ struct bpf_prog_info info = {};
++ __u32 len = sizeof(info);
++
+ err = bpf_prog_get_next_id(id, &id);
+ if (err) {
+ p_err("%s", strerror(errno));
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index a7b4d3f611c5..d8791e0e5f75 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -1633,13 +1633,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
+ .force_header = false,
+ };
+ struct perf_evsel *ev2;
+- static bool init;
+ u64 val;
+
+- if (!init) {
+- perf_stat__init_shadow_stats();
+- init = true;
+- }
+ if (!evsel->stats)
+ perf_evlist__alloc_stats(script->session->evlist, false);
+ if (evsel_script(evsel->leader)->gnum++ == 0)
+@@ -1746,7 +1741,7 @@ static void process_event(struct perf_script *script,
+ return;
+ }
+
+- if (PRINT_FIELD(TRACE)) {
++ if (PRINT_FIELD(TRACE) && sample->raw_data) {
+ event_format__fprintf(evsel->tp_format, sample->cpu,
+ sample->raw_data, sample->raw_size, fp);
+ }
+@@ -2305,6 +2300,8 @@ static int __cmd_script(struct perf_script *script)
+
+ signal(SIGINT, sig_handler);
+
++ perf_stat__init_shadow_stats();
++
+ /* override event processing functions */
+ if (script->show_task_events) {
+ script->tool.comm = process_comm_event;
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index 835619476370..c108519ddd61 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -2424,19 +2424,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
+
+ static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
+ {
+- struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
++ bool found = false;
++ struct perf_evsel *evsel, *tmp;
++ struct parse_events_error err = { .idx = 0, };
++ int ret = parse_events(evlist, "probe:vfs_getname*", &err);
+
+- if (IS_ERR(evsel))
++ if (ret)
+ return false;
+
+- if (perf_evsel__field(evsel, "pathname") == NULL) {
++ evlist__for_each_entry_safe(evlist, evsel, tmp) {
++ if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
++ continue;
++
++ if (perf_evsel__field(evsel, "pathname")) {
++ evsel->handler = trace__vfs_getname;
++ found = true;
++ continue;
++ }
++
++ list_del_init(&evsel->node);
++ evsel->evlist = NULL;
+ perf_evsel__delete(evsel);
+- return false;
+ }
+
+- evsel->handler = trace__vfs_getname;
+- perf_evlist__add(evlist, evsel);
+- return true;
++ return found;
+ }
+
+ static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
+diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
+index 1ccbd3342069..383674f448fc 100644
+--- a/tools/perf/util/cpumap.c
++++ b/tools/perf/util/cpumap.c
+@@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
+ if (!cpu_list)
+ return cpu_map__read_all_cpu_map();
+
+- if (!isdigit(*cpu_list))
++ /*
++ * must handle the case of empty cpumap to cover
++ * TOPOLOGY header for NUMA nodes with no CPU
++ * ( e.g., because of CPU hotplug)
++ */
++ if (!isdigit(*cpu_list) && *cpu_list != '\0')
+ goto out;
+
+ while (isdigit(*cpu_list)) {
+@@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
+
+ if (nr_cpus > 0)
+ cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
+- else
++ else if (*cpu_list != '\0')
+ cpus = cpu_map__default_new();
++ else
++ cpus = cpu_map__dummy_new();
+ invalid:
+ free(tmp_cpus);
+ out:
+diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
+index 1904e7f6ec84..77126a5fa9b6 100644
+--- a/tools/perf/util/ordered-events.c
++++ b/tools/perf/util/ordered-events.c
+@@ -359,8 +359,10 @@ void ordered_events__free(struct ordered_events *oe)
+ * Current buffer might not have all the events allocated
+ * yet, we need to free only allocated ones ...
+ */
+- list_del(&oe->buffer->list);
+- ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
++ if (oe->buffer) {
++ list_del(&oe->buffer->list);
++ ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
++ }
+
+ /* ... and continue with the rest */
+ list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
+diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
+index 63f758c655d5..64d1f36dee99 100644
+--- a/tools/perf/util/setup.py
++++ b/tools/perf/util/setup.py
+@@ -17,6 +17,8 @@ if cc == "clang":
+ vars[var] = sub("-mcet", "", vars[var])
+ if not clang_has_option("-fcf-protection"):
+ vars[var] = sub("-fcf-protection", "", vars[var])
++ if not clang_has_option("-fstack-clash-protection"):
++ vars[var] = sub("-fstack-clash-protection", "", vars[var])
+
+ from distutils.core import setup, Extension
+
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 66a84d5846c8..03cb8c6d620a 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -87,6 +87,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
+ return GELF_ST_TYPE(sym->st_info);
+ }
+
++static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
++{
++ return GELF_ST_VISIBILITY(sym->st_other);
++}
++
+ #ifndef STT_GNU_IFUNC
+ #define STT_GNU_IFUNC 10
+ #endif
+@@ -111,7 +116,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
+ return elf_sym__type(sym) == STT_NOTYPE &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF &&
+- sym->st_shndx != SHN_ABS;
++ sym->st_shndx != SHN_ABS &&
++ elf_sym__visibility(sym) != STV_HIDDEN &&
++ elf_sym__visibility(sym) != STV_INTERNAL;
+ }
+
+ static bool elf_sym__filter(GElf_Sym *sym)
+diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
+index 315a44fa32af..84fd6f1bf33e 100644
+--- a/tools/testing/selftests/bpf/bpf_util.h
++++ b/tools/testing/selftests/bpf/bpf_util.h
+@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
+ unsigned int start, end, possible_cpus = 0;
+ char buff[128];
+ FILE *fp;
+- int n;
++ int len, n, i, j = 0;
+
+ fp = fopen(fcpu, "r");
+ if (!fp) {
+@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void)
+ exit(1);
+ }
+
+- while (fgets(buff, sizeof(buff), fp)) {
+- n = sscanf(buff, "%u-%u", &start, &end);
+- if (n == 0) {
+- printf("Failed to retrieve # possible CPUs!\n");
+- exit(1);
+- } else if (n == 1) {
+- end = start;
++ if (!fgets(buff, sizeof(buff), fp)) {
++ printf("Failed to read %s!\n", fcpu);
++ exit(1);
++ }
++
++ len = strlen(buff);
++ for (i = 0; i <= len; i++) {
++ if (buff[i] == ',' || buff[i] == '\0') {
++ buff[i] = '\0';
++ n = sscanf(&buff[j], "%u-%u", &start, &end);
++ if (n <= 0) {
++ printf("Failed to retrieve # possible CPUs!\n");
++ exit(1);
++ } else if (n == 1) {
++ end = start;
++ }
++ possible_cpus += end - start + 1;
++ j = i + 1;
+ }
+- possible_cpus = start == 0 ? end + 1 : 0;
+- break;
+ }
++
+ fclose(fp);
+
+ return possible_cpus;
+diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
+index bab13dd025a6..0d26b5e3f966 100755
+--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
++++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
+@@ -37,6 +37,10 @@ prerequisite()
+ exit $ksft_skip
+ fi
+
++ present_cpus=`cat $SYSFS/devices/system/cpu/present`
++ present_max=${present_cpus##*-}
++ echo "present_cpus = $present_cpus present_max = $present_max"
++
+ echo -e "\t Cpus in online state: $online_cpus"
+
+ offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
+@@ -151,6 +155,8 @@ online_cpus=0
+ online_max=0
+ offline_cpus=0
+ offline_max=0
++present_cpus=0
++present_max=0
+
+ while getopts e:ahp: opt; do
+ case $opt in
+@@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then
+ online_cpu_expect_success $online_max
+
+ if [[ $offline_cpus -gt 0 ]]; then
+- echo -e "\t offline to online to offline: cpu $offline_max"
+- online_cpu_expect_success $offline_max
+- offline_cpu_expect_success $offline_max
++ echo -e "\t offline to online to offline: cpu $present_max"
++ online_cpu_expect_success $present_max
++ offline_cpu_expect_success $present_max
++ online_cpu $present_max
+ fi
+ exit 0
+ else
+diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
+index 923570a9708a..68e2295e7589 100644
+--- a/tools/testing/selftests/net/Makefile
++++ b/tools/testing/selftests/net/Makefile
+@@ -19,6 +19,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
+ KSFT_KHDR_INSTALL := 1
+ include ../lib.mk
+
+-$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
++$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
+ $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
+ $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index 47ed6cef93fb..c9ff2b47bd1c 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for netfilter selftests
+
+-TEST_PROGS := nft_trans_stress.sh
++TEST_PROGS := nft_trans_stress.sh nft_nat.sh
+
+ include ../lib.mk
+diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
+index 1017313e41a8..59caa8f71cd8 100644
+--- a/tools/testing/selftests/netfilter/config
++++ b/tools/testing/selftests/netfilter/config
+@@ -1,2 +1,2 @@
+ CONFIG_NET_NS=y
+-NF_TABLES_INET=y
++CONFIG_NF_TABLES_INET=y
+diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
+new file mode 100755
+index 000000000000..8ec76681605c
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/nft_nat.sh
+@@ -0,0 +1,762 @@
++#!/bin/bash
++#
++# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
++#
++
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++ret=0
++
++nft --version > /dev/null 2>&1
++if [ $? -ne 0 ];then
++ echo "SKIP: Could not run test without nft tool"
++ exit $ksft_skip
++fi
++
++ip -Version > /dev/null 2>&1
++if [ $? -ne 0 ];then
++ echo "SKIP: Could not run test without ip tool"
++ exit $ksft_skip
++fi
++
++ip netns add ns0
++ip netns add ns1
++ip netns add ns2
++
++ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
++ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
++
++ip -net ns0 link set lo up
++ip -net ns0 link set veth0 up
++ip -net ns0 addr add 10.0.1.1/24 dev veth0
++ip -net ns0 addr add dead:1::1/64 dev veth0
++
++ip -net ns0 link set veth1 up
++ip -net ns0 addr add 10.0.2.1/24 dev veth1
++ip -net ns0 addr add dead:2::1/64 dev veth1
++
++for i in 1 2; do
++ ip -net ns$i link set lo up
++ ip -net ns$i link set eth0 up
++ ip -net ns$i addr add 10.0.$i.99/24 dev eth0
++ ip -net ns$i route add default via 10.0.$i.1
++ ip -net ns$i addr add dead:$i::99/64 dev eth0
++ ip -net ns$i route add default via dead:$i::1
++done
++
++bad_counter()
++{
++ local ns=$1
++ local counter=$2
++ local expect=$3
++
++ echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
++ ip netns exec $ns nft list counter inet filter $counter 1>&2
++}
++
++check_counters()
++{
++ ns=$1
++ local lret=0
++
++ cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
++ if [ $? -ne 0 ]; then
++ bad_counter $ns ns0in "packets 1 bytes 84"
++ lret=1
++ fi
++ cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
++ if [ $? -ne 0 ]; then
++ bad_counter $ns ns0out "packets 1 bytes 84"
++ lret=1
++ fi
++
++ expect="packets 1 bytes 104"
++ cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter $ns ns0in6 "$expect"
++ lret=1
++ fi
++ cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter $ns ns0out6 "$expect"
++ lret=1
++ fi
++
++ return $lret
++}
++
++check_ns0_counters()
++{
++ local ns=$1
++ local lret=0
++
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 ns0in "packets 0 bytes 0"
++ lret=1
++ fi
++
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 ns0in6 "packets 0 bytes 0"
++ lret=1
++ fi
++
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 ns0out "packets 0 bytes 0"
++ lret=1
++ fi
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 ns0out6 "packets 0 bytes 0"
++ lret=1
++ fi
++
++ for dir in "in" "out" ; do
++ expect="packets 1 bytes 84"
++ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 $ns$dir "$expect"
++ lret=1
++ fi
++
++ expect="packets 1 bytes 104"
++ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 $ns$dir6 "$expect"
++ lret=1
++ fi
++ done
++
++ return $lret
++}
++
++reset_counters()
++{
++ for i in 0 1 2;do
++ ip netns exec ns$i nft reset counters inet > /dev/null
++ done
++}
++
++test_local_dnat6()
++{
++ local lret=0
++ip netns exec ns0 nft -f - <<EOF
++table ip6 nat {
++ chain output {
++ type nat hook output priority 0; policy accept;
++ ip6 daddr dead:1::99 dnat to dead:2::99
++ }
++}
++EOF
++ if [ $? -ne 0 ]; then
++ echo "SKIP: Could not add add ip6 dnat hook"
++ return $ksft_skip
++ fi
++
++ # ping netns1, expect rewrite to netns2
++ ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
++ if [ $? -ne 0 ]; then
++ lret=1
++ echo "ERROR: ping6 failed"
++ return $lret
++ fi
++
++ expect="packets 0 bytes 0"
++ for dir in "in6" "out6" ; do
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++
++ expect="packets 1 bytes 104"
++ for dir in "in6" "out6" ; do
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 ns2$dir "$expect"
++ lret=1
++ fi
++ done
++
++ # expect 0 count in ns1
++ expect="packets 0 bytes 0"
++ for dir in "in6" "out6" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns0$dir "$expect"
++ lret=1
++ fi
++ done
++
++ # expect 1 packet in ns2
++ expect="packets 1 bytes 104"
++ for dir in "in6" "out6" ; do
++ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns0$dir "$expect"
++ lret=1
++ fi
++ done
++
++ test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
++ ip netns exec ns0 nft flush chain ip6 nat output
++
++ return $lret
++}
++
++test_local_dnat()
++{
++ local lret=0
++ip netns exec ns0 nft -f - <<EOF
++table ip nat {
++ chain output {
++ type nat hook output priority 0; policy accept;
++ ip daddr 10.0.1.99 dnat to 10.0.2.99
++ }
++}
++EOF
++ # ping netns1, expect rewrite to netns2
++ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
++ if [ $? -ne 0 ]; then
++ lret=1
++ echo "ERROR: ping failed"
++ return $lret
++ fi
++
++ expect="packets 0 bytes 0"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++
++ expect="packets 1 bytes 84"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 ns2$dir "$expect"
++ lret=1
++ fi
++ done
++
++ # expect 0 count in ns1
++ expect="packets 0 bytes 0"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns0$dir "$expect"
++ lret=1
++ fi
++ done
++
++ # expect 1 packet in ns2
++ expect="packets 1 bytes 84"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns0$dir "$expect"
++ lret=1
++ fi
++ done
++
++ test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
++
++ ip netns exec ns0 nft flush chain ip nat output
++
++ reset_counters
++ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
++ if [ $? -ne 0 ]; then
++ lret=1
++ echo "ERROR: ping failed"
++ return $lret
++ fi
++
++ expect="packets 1 bytes 84"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++ expect="packets 0 bytes 0"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 ns2$dir "$expect"
++ lret=1
++ fi
++ done
++
++ # expect 1 count in ns1
++ expect="packets 1 bytes 84"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns0 ns0$dir "$expect"
++ lret=1
++ fi
++ done
++
++ # expect 0 packet in ns2
++ expect="packets 0 bytes 0"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns2$dir "$expect"
++ lret=1
++ fi
++ done
++
++ test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
++
++ return $lret
++}
++
++
++test_masquerade6()
++{
++ local lret=0
++
++ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
++
++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
++ if [ $? -ne 0 ] ; then
++ echo "ERROR: cannot ping ns1 from ns2 via ipv6"
++ return 1
++ lret=1
++ fi
++
++ expect="packets 1 bytes 104"
++ for dir in "in6" "out6" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns2$dir "$expect"
++ lret=1
++ fi
++
++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++
++ reset_counters
++
++# add masquerading rule
++ip netns exec ns0 nft -f - <<EOF
++table ip6 nat {
++ chain postrouting {
++ type nat hook postrouting priority 0; policy accept;
++ meta oif veth0 masquerade
++ }
++}
++EOF
++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
++ if [ $? -ne 0 ] ; then
++ echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
++ lret=1
++ fi
++
++ # ns1 should have seen packets from ns0, due to masquerade
++ expect="packets 1 bytes 104"
++ for dir in "in6" "out6" ; do
++
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns0$dir "$expect"
++ lret=1
++ fi
++
++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++
++ # ns1 should not have seen packets from ns2, due to masquerade
++ expect="packets 0 bytes 0"
++ for dir in "in6" "out6" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns0$dir "$expect"
++ lret=1
++ fi
++
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++
++ ip netns exec ns0 nft flush chain ip6 nat postrouting
++ if [ $? -ne 0 ]; then
++ echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
++ lret=1
++ fi
++
++ test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
++
++ return $lret
++}
++
++test_masquerade()
++{
++ local lret=0
++
++ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
++ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
++
++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
++ if [ $? -ne 0 ] ; then
++ echo "ERROR: canot ping ns1 from ns2"
++ lret=1
++ fi
++
++ expect="packets 1 bytes 84"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns2$dir "$expect"
++ lret=1
++ fi
++
++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++
++ reset_counters
++
++# add masquerading rule
++ip netns exec ns0 nft -f - <<EOF
++table ip nat {
++ chain postrouting {
++ type nat hook postrouting priority 0; policy accept;
++ meta oif veth0 masquerade
++ }
++}
++EOF
++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
++ if [ $? -ne 0 ] ; then
++ echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
++ lret=1
++ fi
++
++ # ns1 should have seen packets from ns0, due to masquerade
++ expect="packets 1 bytes 84"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns0$dir "$expect"
++ lret=1
++ fi
++
++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++
++ # ns1 should not have seen packets from ns2, due to masquerade
++ expect="packets 0 bytes 0"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns0$dir "$expect"
++ lret=1
++ fi
++
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++
++ ip netns exec ns0 nft flush chain ip nat postrouting
++ if [ $? -ne 0 ]; then
++ echo "ERROR: Could not flush nat postrouting" 1>&2
++ lret=1
++ fi
++
++ test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
++
++ return $lret
++}
++
++test_redirect6()
++{
++ local lret=0
++
++ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
++
++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
++ if [ $? -ne 0 ] ; then
++ echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
++ lret=1
++ fi
++
++ expect="packets 1 bytes 104"
++ for dir in "in6" "out6" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns2$dir "$expect"
++ lret=1
++ fi
++
++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++
++ reset_counters
++
++# add redirect rule
++ip netns exec ns0 nft -f - <<EOF
++table ip6 nat {
++ chain prerouting {
++ type nat hook prerouting priority 0; policy accept;
++ meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
++ }
++}
++EOF
++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
++ if [ $? -ne 0 ] ; then
++ echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
++ lret=1
++ fi
++
++ # ns1 should have seen no packets from ns2, due to redirection
++ expect="packets 0 bytes 0"
++ for dir in "in6" "out6" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns0$dir "$expect"
++ lret=1
++ fi
++ done
++
++ # ns0 should have seen packets from ns2, due to masquerade
++ expect="packets 1 bytes 104"
++ for dir in "in6" "out6" ; do
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns0$dir "$expect"
++ lret=1
++ fi
++ done
++
++ ip netns exec ns0 nft delete table ip6 nat
++ if [ $? -ne 0 ]; then
++ echo "ERROR: Could not delete ip6 nat table" 1>&2
++ lret=1
++ fi
++
++ test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
++
++ return $lret
++}
++
++test_redirect()
++{
++ local lret=0
++
++ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
++ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
++
++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
++ if [ $? -ne 0 ] ; then
++ echo "ERROR: cannot ping ns1 from ns2"
++ lret=1
++ fi
++
++ expect="packets 1 bytes 84"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns2$dir "$expect"
++ lret=1
++ fi
++
++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns2 ns1$dir "$expect"
++ lret=1
++ fi
++ done
++
++ reset_counters
++
++# add redirect rule
++ip netns exec ns0 nft -f - <<EOF
++table ip nat {
++ chain prerouting {
++ type nat hook prerouting priority 0; policy accept;
++ meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
++ }
++}
++EOF
++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
++ if [ $? -ne 0 ] ; then
++ echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
++ lret=1
++ fi
++
++ # ns1 should have seen no packets from ns2, due to redirection
++ expect="packets 0 bytes 0"
++ for dir in "in" "out" ; do
++
++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns0$dir "$expect"
++ lret=1
++ fi
++ done
++
++ # ns0 should have seen packets from ns2, due to masquerade
++ expect="packets 1 bytes 84"
++ for dir in "in" "out" ; do
++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
++ if [ $? -ne 0 ]; then
++ bad_counter ns1 ns0$dir "$expect"
++ lret=1
++ fi
++ done
++
++ ip netns exec ns0 nft delete table ip nat
++ if [ $? -ne 0 ]; then
++ echo "ERROR: Could not delete nat table" 1>&2
++ lret=1
++ fi
++
++ test $lret -eq 0 && echo "PASS: IP redirection for ns2"
++
++ return $lret
++}
++
++
++# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
++for i in 0 1 2; do
++ip netns exec ns$i nft -f - <<EOF
++table inet filter {
++ counter ns0in {}
++ counter ns1in {}
++ counter ns2in {}
++
++ counter ns0out {}
++ counter ns1out {}
++ counter ns2out {}
++
++ counter ns0in6 {}
++ counter ns1in6 {}
++ counter ns2in6 {}
++
++ counter ns0out6 {}
++ counter ns1out6 {}
++ counter ns2out6 {}
++
++ map nsincounter {
++ type ipv4_addr : counter
++ elements = { 10.0.1.1 : "ns0in",
++ 10.0.2.1 : "ns0in",
++ 10.0.1.99 : "ns1in",
++ 10.0.2.99 : "ns2in" }
++ }
++
++ map nsincounter6 {
++ type ipv6_addr : counter
++ elements = { dead:1::1 : "ns0in6",
++ dead:2::1 : "ns0in6",
++ dead:1::99 : "ns1in6",
++ dead:2::99 : "ns2in6" }
++ }
++
++ map nsoutcounter {
++ type ipv4_addr : counter
++ elements = { 10.0.1.1 : "ns0out",
++ 10.0.2.1 : "ns0out",
++ 10.0.1.99: "ns1out",
++ 10.0.2.99: "ns2out" }
++ }
++
++ map nsoutcounter6 {
++ type ipv6_addr : counter
++ elements = { dead:1::1 : "ns0out6",
++ dead:2::1 : "ns0out6",
++ dead:1::99 : "ns1out6",
++ dead:2::99 : "ns2out6" }
++ }
++
++ chain input {
++ type filter hook input priority 0; policy accept;
++ counter name ip saddr map @nsincounter
++ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
++ }
++ chain output {
++ type filter hook output priority 0; policy accept;
++ counter name ip daddr map @nsoutcounter
++ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
++ }
++}
++EOF
++done
++
++sleep 3
++# test basic connectivity
++for i in 1 2; do
++ ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
++ if [ $? -ne 0 ];then
++ echo "ERROR: Could not reach other namespace(s)" 1>&2
++ ret=1
++ fi
++
++ ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
++ if [ $? -ne 0 ];then
++ echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
++ ret=1
++ fi
++ check_counters ns$i
++ if [ $? -ne 0 ]; then
++ ret=1
++ fi
++
++ check_ns0_counters ns$i
++ if [ $? -ne 0 ]; then
++ ret=1
++ fi
++ reset_counters
++done
++
++if [ $ret -eq 0 ];then
++ echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
++fi
++
++reset_counters
++test_local_dnat
++test_local_dnat6
++
++reset_counters
++test_masquerade
++test_masquerade6
++
++reset_counters
++test_redirect
++test_redirect6
++
++for i in 0 1 2; do ip netns del ns$i;done
++
++exit $ret
+diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
+index 82121a81681f..29bac5ef9a93 100644
+--- a/tools/testing/selftests/proc/.gitignore
++++ b/tools/testing/selftests/proc/.gitignore
+@@ -10,4 +10,5 @@
+ /proc-uptime-002
+ /read
+ /self
++/setns-dcache
+ /thread-self
+diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
+index 1c12c34cf85d..434d033ee067 100644
+--- a/tools/testing/selftests/proc/Makefile
++++ b/tools/testing/selftests/proc/Makefile
+@@ -14,6 +14,7 @@ TEST_GEN_PROGS += proc-uptime-001
+ TEST_GEN_PROGS += proc-uptime-002
+ TEST_GEN_PROGS += read
+ TEST_GEN_PROGS += self
++TEST_GEN_PROGS += setns-dcache
+ TEST_GEN_PROGS += thread-self
+
+ include ../lib.mk
+diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
+new file mode 100644
+index 000000000000..60ab197a73fc
+--- /dev/null
++++ b/tools/testing/selftests/proc/setns-dcache.c
+@@ -0,0 +1,129 @@
++/*
++ * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com>
++ *
++ * Permission to use, copy, modify, and distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++/*
++ * Test that setns(CLONE_NEWNET) points to new /proc/net content even
++ * if old one is in dcache.
++ *
++ * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
++ */
++#undef NDEBUG
++#include <assert.h>
++#include <errno.h>
++#include <sched.h>
++#include <signal.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <unistd.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <fcntl.h>
++#include <sys/socket.h>
++
++static pid_t pid = -1;
++
++static void f(void)
++{
++ if (pid > 0) {
++ kill(pid, SIGTERM);
++ }
++}
++
++int main(void)
++{
++ int fd[2];
++ char _ = 0;
++ int nsfd;
++
++ atexit(f);
++
++ /* Check for priviledges and syscall availability straight away. */
++ if (unshare(CLONE_NEWNET) == -1) {
++ if (errno == ENOSYS || errno == EPERM) {
++ return 4;
++ }
++ return 1;
++ }
++ /* Distinguisher between two otherwise empty net namespaces. */
++ if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
++ return 1;
++ }
++
++ if (pipe(fd) == -1) {
++ return 1;
++ }
++
++ pid = fork();
++ if (pid == -1) {
++ return 1;
++ }
++
++ if (pid == 0) {
++ if (unshare(CLONE_NEWNET) == -1) {
++ return 1;
++ }
++
++ if (write(fd[1], &_, 1) != 1) {
++ return 1;
++ }
++
++ pause();
++
++ return 0;
++ }
++
++ if (read(fd[0], &_, 1) != 1) {
++ return 1;
++ }
++
++ {
++ char buf[64];
++ snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
++ nsfd = open(buf, O_RDONLY);
++ if (nsfd == -1) {
++ return 1;
++ }
++ }
++
++ /* Reliably pin dentry into dcache. */
++ (void)open("/proc/net/unix", O_RDONLY);
++
++ if (setns(nsfd, CLONE_NEWNET) == -1) {
++ return 1;
++ }
++
++ kill(pid, SIGTERM);
++ pid = 0;
++
++ {
++ char buf[4096];
++ ssize_t rv;
++ int fd;
++
++ fd = open("/proc/net/unix", O_RDONLY);
++ if (fd == -1) {
++ return 1;
++ }
++
++#define S "Num RefCount Protocol Flags Type St Inode Path\n"
++ rv = read(fd, buf, sizeof(buf));
++
++ assert(rv == strlen(S));
++ assert(memcmp(buf, S, strlen(S)) == 0);
++ }
++
++ return 0;
++}
+diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
+index c02683cfb6c9..7656c7ce79d9 100644
+--- a/tools/testing/selftests/timers/Makefile
++++ b/tools/testing/selftests/timers/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ CFLAGS += -O3 -Wl,-no-as-needed -Wall
+-LDFLAGS += -lrt -lpthread -lm
++LDLIBS += -lrt -lpthread -lm
+
+ # these are all "safe" tests that don't modify
+ # system time or require escalated privileges