summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-06-22 15:08:46 -0400
committerMike Pagano <mpagano@gentoo.org>2019-06-22 15:08:46 -0400
commitda75bd9250a2a1f5c4df3d9eb65f2ae59c6c2774 (patch)
treee1c3c63002ff64483d4de809da9976787fcb1c39
parentLinux patch 4.14.128 (diff)
downloadlinux-patches-4.14-137.tar.gz
linux-patches-4.14-137.tar.bz2
linux-patches-4.14-137.zip
Linux patch 4.14.1294.14-137
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1128_linux-4.14.129.patch1201
2 files changed, 1205 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 2a274702..dc6e2d8b 100644
--- a/0000_README
+++ b/0000_README
@@ -555,6 +555,10 @@ Patch: 1127_linux-4.14.128.patch
From: https://www.kernel.org
Desc: Linux 4.14.128
+Patch: 1128_linux-4.14.129.patch
+From: https://www.kernel.org
+Desc: Linux 4.14.129
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1128_linux-4.14.129.patch b/1128_linux-4.14.129.patch
new file mode 100644
index 00000000..6ca5c295
--- /dev/null
+++ b/1128_linux-4.14.129.patch
@@ -0,0 +1,1201 @@
+diff --git a/Makefile b/Makefile
+index ccdac2828cf7..2a55dd5bee0a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 128
++SUBLEVEL = 129
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
+index aa19b7ac8222..476c7b4be378 100644
+--- a/arch/ia64/mm/numa.c
++++ b/arch/ia64/mm/numa.c
+@@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)
+
+ return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
+ }
++EXPORT_SYMBOL(paddr_to_nid);
+
+ #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
+ /*
+diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
+index e3ba58f64c3d..5070b34b12fd 100644
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -296,6 +296,7 @@ struct kvm_arch {
+ #ifdef CONFIG_PPC_BOOK3S_64
+ struct list_head spapr_tce_tables;
+ struct list_head rtas_tokens;
++ struct mutex rtas_token_lock;
+ DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
+ #endif
+ #ifdef CONFIG_KVM_MPIC
+diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
+index 72d977e30952..d38280b01ef0 100644
+--- a/arch/powerpc/kvm/book3s.c
++++ b/arch/powerpc/kvm/book3s.c
+@@ -836,6 +836,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
+ #ifdef CONFIG_PPC64
+ INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
+ INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
++ mutex_init(&kvm->arch.rtas_token_lock);
+ #endif
+
+ return kvm->arch.kvm_ops->init_vm(kvm);
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 58746328b9bd..3b7488fce3db 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -392,12 +392,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
+
+ static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
+ {
+- struct kvm_vcpu *ret;
+-
+- mutex_lock(&kvm->lock);
+- ret = kvm_get_vcpu_by_id(kvm, id);
+- mutex_unlock(&kvm->lock);
+- return ret;
++ return kvm_get_vcpu_by_id(kvm, id);
+ }
+
+ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
+@@ -1258,7 +1253,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 mask;
+
+- mutex_lock(&kvm->lock);
+ spin_lock(&vc->lock);
+ /*
+ * If ILE (interrupt little-endian) has changed, update the
+@@ -1298,7 +1292,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
+ mask &= 0xFFFFFFFF;
+ vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
+ spin_unlock(&vc->lock);
+- mutex_unlock(&kvm->lock);
+ }
+
+ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
+index 2d3b2b1cc272..8f2355138f80 100644
+--- a/arch/powerpc/kvm/book3s_rtas.c
++++ b/arch/powerpc/kvm/book3s_rtas.c
+@@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
+ {
+ struct rtas_token_definition *d, *tmp;
+
+- lockdep_assert_held(&kvm->lock);
++ lockdep_assert_held(&kvm->arch.rtas_token_lock);
+
+ list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
+ if (rtas_name_matches(d->handler->name, name)) {
+@@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
+ bool found;
+ int i;
+
+- lockdep_assert_held(&kvm->lock);
++ lockdep_assert_held(&kvm->arch.rtas_token_lock);
+
+ list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
+ if (d->token == token)
+@@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+- mutex_lock(&kvm->lock);
++ mutex_lock(&kvm->arch.rtas_token_lock);
+
+ if (args.token)
+ rc = rtas_token_define(kvm, args.name, args.token);
+ else
+ rc = rtas_token_undefine(kvm, args.name);
+
+- mutex_unlock(&kvm->lock);
++ mutex_unlock(&kvm->arch.rtas_token_lock);
+
+ return rc;
+ }
+@@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
+ orig_rets = args.rets;
+ args.rets = &args.args[be32_to_cpu(args.nargs)];
+
+- mutex_lock(&vcpu->kvm->lock);
++ mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
+
+ rc = -ENOENT;
+ list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
+@@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
+ }
+ }
+
+- mutex_unlock(&vcpu->kvm->lock);
++ mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
+
+ if (rc == 0) {
+ args.rets = orig_rets;
+@@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
+ {
+ struct rtas_token_definition *d, *tmp;
+
+- lockdep_assert_held(&kvm->lock);
+-
+ list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
+ list_del(&d->list);
+ kfree(d);
+diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
+index 6914b289c86b..7b93191dc2e3 100644
+--- a/arch/powerpc/platforms/powernv/opal-imc.c
++++ b/arch/powerpc/platforms/powernv/opal-imc.c
+@@ -87,6 +87,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
+ struct imc_pmu *pmu_ptr;
+ u32 offset;
+
++ /* Return for unknown domain */
++ if (domain < 0)
++ return -EINVAL;
++
+ /* memory for pmu */
+ pmu_ptr = kzalloc(sizeof(struct imc_pmu), GFP_KERNEL);
+ if (!pmu_ptr)
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 25386be0d757..3310f9f6c3e1 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -681,7 +681,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -690,7 +690,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
+ EVENT_CONSTRAINT_END
+@@ -698,7 +698,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
+
+ struct event_constraint intel_slm_pebs_event_constraints[] = {
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
+ EVENT_CONSTRAINT_END
+@@ -729,7 +729,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -746,7 +746,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -755,7 +755,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
+ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
+ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+@@ -770,9 +770,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
+ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
+ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+@@ -786,9 +786,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
+ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
+@@ -809,9 +809,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
+ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
+@@ -832,9 +832,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
+ struct event_constraint intel_skl_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
+ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
+ /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
+ INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index ecf82859f1c0..bbebcd7a781e 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -792,8 +792,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
+ {
+ set_cpu_cap(c, X86_FEATURE_ZEN);
+
+- /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
+- if (!cpu_has(c, X86_FEATURE_CPB))
++ /*
++ * Fix erratum 1076: CPB feature bit not being set in CPUID.
++ * Always set it, except when running under a hypervisor.
++ */
++ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
+ set_cpu_cap(c, X86_FEATURE_CPB);
+ }
+
+diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
+index 53e71d0503ec..82e4d5cccf84 100644
+--- a/drivers/clk/ti/clkctrl.c
++++ b/drivers/clk/ti/clkctrl.c
+@@ -124,9 +124,6 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
+ int ret;
+ union omap4_timeout timeout = { 0 };
+
+- if (!clk->enable_bit)
+- return 0;
+-
+ if (clk->clkdm) {
+ ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+ if (ret) {
+@@ -138,6 +135,9 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
+ }
+ }
+
++ if (!clk->enable_bit)
++ return 0;
++
+ val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+ val &= ~OMAP4_MODULEMODE_MASK;
+@@ -166,7 +166,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
+ union omap4_timeout timeout = { 0 };
+
+ if (!clk->enable_bit)
+- return;
++ goto exit;
+
+ val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 3f80f167ed56..2357d2f73c1a 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -712,6 +712,7 @@ config GPIO_ADP5588
+ config GPIO_ADP5588_IRQ
+ bool "Interrupt controller support for ADP5588"
+ depends on GPIO_ADP5588=y
++ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to enable the adp5588 to be used as an interrupt
+ controller. It requires the driver to be built in the kernel.
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 22e80685cd52..b42bb955f22d 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1225,13 +1225,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ /* Add back in missing bits of ID for non-USI pens */
+ wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
+ }
+- wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));
+
+ for (i = 0; i < pen_frames; i++) {
+ unsigned char *frame = &data[i*pen_frame_len + 1];
+ bool valid = frame[0] & 0x80;
+ bool prox = frame[0] & 0x40;
+ bool range = frame[0] & 0x20;
++ bool invert = frame[0] & 0x10;
+
+ if (!valid)
+ continue;
+@@ -1240,8 +1240,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ wacom->shared->stylus_in_proximity = false;
+ wacom_exit_report(wacom);
+ input_sync(pen_input);
++
++ wacom->tool[0] = 0;
++ wacom->id[0] = 0;
++ wacom->serial[0] = 0;
+ return;
+ }
++
+ if (range) {
+ /* Fix rotation alignment: userspace expects zero at left */
+ int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
+@@ -1249,6 +1254,16 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ if (rotation > 899)
+ rotation -= 1800;
+
++ if (!wacom->tool[0]) { /* first in range */
++ /* Going into range select tool */
++ if (invert)
++ wacom->tool[0] = BTN_TOOL_RUBBER;
++ else if (wacom->id[0])
++ wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]);
++ else
++ wacom->tool[0] = BTN_TOOL_PEN;
++ }
++
+ input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
+ input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
+ input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]);
+@@ -1256,17 +1271,19 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ input_report_abs(pen_input, ABS_Z, rotation);
+ input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
+ }
+- input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
+- input_report_abs(pen_input, ABS_DISTANCE, range ? frame[13] : wacom->features.distance_max);
+-
+- input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
+- input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
+- input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
+-
+- input_report_key(pen_input, wacom->tool[0], prox);
+- input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
+- input_report_abs(pen_input, ABS_MISC,
+- wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
++ if (wacom->tool[0]) {
++ input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
++ input_report_abs(pen_input, ABS_DISTANCE, range ? frame[13] : wacom->features.distance_max);
++
++ input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09);
++ input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
++ input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
++
++ input_report_key(pen_input, wacom->tool[0], prox);
++ input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
++ input_report_abs(pen_input, ABS_MISC,
++ wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
++ }
+
+ wacom->shared->stylus_in_proximity = prox;
+
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index 00e8e675cbeb..eaa312bc3a3c 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -297,6 +297,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
+ rdwr_pa[i].buf[0] < 1 ||
+ rdwr_pa[i].len < rdwr_pa[i].buf[0] +
+ I2C_SMBUS_BLOCK_MAX) {
++ i++;
+ res = -EINVAL;
+ break;
+ }
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index e2beb182d54c..0299c0642de8 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -1197,6 +1197,8 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+ * mlx4_ib_vma_close().
+ */
+ down_write(&owning_mm->mmap_sem);
++ if (!mmget_still_valid(owning_mm))
++ goto skip_mm;
+ for (i = 0; i < HW_BAR_COUNT; i++) {
+ vma = context->hw_bar_info[i].vma;
+ if (!vma)
+@@ -1215,7 +1217,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+ /* context going to be destroyed, should not access ops any more */
+ context->hw_bar_info[i].vma->vm_ops = NULL;
+ }
+-
++skip_mm:
+ up_write(&owning_mm->mmap_sem);
+ mmput(owning_mm);
+ put_task_struct(owning_process);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 13a92062e9ca..3fbe3962d53b 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1646,6 +1646,8 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+ * mlx5_ib_vma_close.
+ */
+ down_write(&owning_mm->mmap_sem);
++ if (!mmget_still_valid(owning_mm))
++ goto skip_mm;
+ mutex_lock(&context->vma_private_list_mutex);
+ list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
+ list) {
+@@ -1662,6 +1664,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+ kfree(vma_private);
+ }
+ mutex_unlock(&context->vma_private_list_mutex);
++skip_mm:
+ up_write(&owning_mm->mmap_sem);
+ mmput(owning_mm);
+ put_task_struct(owning_process);
+diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
+index 65cb4aac8dce..477e07036add 100644
+--- a/drivers/isdn/mISDN/socket.c
++++ b/drivers/isdn/mISDN/socket.c
+@@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ memcpy(di.channelmap, dev->channelmap,
+ sizeof(di.channelmap));
+ di.nrbchan = dev->nrbchan;
+- strcpy(di.name, dev_name(&dev->dev));
++ strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
+ if (copy_to_user((void __user *)arg, &di, sizeof(di)))
+ err = -EFAULT;
+ } else
+@@ -678,7 +678,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ memcpy(di.channelmap, dev->channelmap,
+ sizeof(di.channelmap));
+ di.nrbchan = dev->nrbchan;
+- strcpy(di.name, dev_name(&dev->dev));
++ strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
+ if (copy_to_user((void __user *)arg, &di, sizeof(di)))
+ err = -EFAULT;
+ } else
+@@ -692,6 +692,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ err = -EFAULT;
+ break;
+ }
++ dn.name[sizeof(dn.name) - 1] = '\0';
+ dev = get_mdevice(dn.id);
+ if (dev)
+ err = device_rename(&dev->dev, dn.name);
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+index f4b3554b0b67..236325f48ec9 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+@@ -683,38 +683,41 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
+ if (is_err || rxd_wb->type & 0x1000U) {
+ /* status error or DMA error */
+ buff->is_error = 1U;
+- } else {
+- if (self->aq_nic_cfg->is_rss) {
+- /* last 4 byte */
+- u16 rss_type = rxd_wb->type & 0xFU;
+-
+- if (rss_type && rss_type < 0x8U) {
+- buff->is_hash_l4 = (rss_type == 0x4 ||
+- rss_type == 0x5);
+- buff->rss_hash = rxd_wb->rss_hash;
+- }
++ }
++ if (self->aq_nic_cfg->is_rss) {
++ /* last 4 byte */
++ u16 rss_type = rxd_wb->type & 0xFU;
++
++ if (rss_type && rss_type < 0x8U) {
++ buff->is_hash_l4 = (rss_type == 0x4 ||
++ rss_type == 0x5);
++ buff->rss_hash = rxd_wb->rss_hash;
+ }
++ }
+
+- if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
+- buff->len = rxd_wb->pkt_len %
+- AQ_CFG_RX_FRAME_MAX;
+- buff->len = buff->len ?
+- buff->len : AQ_CFG_RX_FRAME_MAX;
+- buff->next = 0U;
+- buff->is_eop = 1U;
++ if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
++ buff->len = rxd_wb->pkt_len %
++ AQ_CFG_RX_FRAME_MAX;
++ buff->len = buff->len ?
++ buff->len : AQ_CFG_RX_FRAME_MAX;
++ buff->next = 0U;
++ buff->is_eop = 1U;
++ } else {
++ buff->len =
++ rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
++ AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
++
++ if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
++ rxd_wb->status) {
++ /* LRO */
++ buff->next = rxd_wb->next_desc_ptr;
++ ++ring->stats.rx.lro_packets;
+ } else {
+- if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
+- rxd_wb->status) {
+- /* LRO */
+- buff->next = rxd_wb->next_desc_ptr;
+- ++ring->stats.rx.lro_packets;
+- } else {
+- /* jumbo */
+- buff->next =
+- aq_ring_next_dx(ring,
+- ring->hw_head);
+- ++ring->stats.rx.jumbo_packets;
+- }
++ /* jumbo */
++ buff->next =
++ aq_ring_next_dx(ring,
++ ring->hw_head);
++ ++ring->stats.rx.jumbo_packets;
+ }
+ }
+ }
+diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
+index 0affee9c8aa2..0b1e7a96ff49 100644
+--- a/drivers/net/ethernet/dec/tulip/de4x5.c
++++ b/drivers/net/ethernet/dec/tulip/de4x5.c
+@@ -2108,7 +2108,6 @@ static struct eisa_driver de4x5_eisa_driver = {
+ .remove = de4x5_eisa_remove,
+ }
+ };
+-MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
+ #endif
+
+ #ifdef CONFIG_PCI
+diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+index 7d1819c9e8cc..6ce7b8435ace 100644
+--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+@@ -1103,7 +1103,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
+ break;
+ case ETHTOOL_GRXRINGS:
+- cmd->data = adapter->num_rx_qs - 1;
++ cmd->data = adapter->num_rx_qs;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index ab09f9e43c79..5c74787f903b 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2505,6 +2505,10 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
+ mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
+
+ autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
++ if (!autoneg && cmd->base.speed == SPEED_56000) {
++ netdev_err(dev, "56G not supported with autoneg off\n");
++ return -EINVAL;
++ }
+ eth_proto_new = autoneg ?
+ mlxsw_sp_to_ptys_advert_link(cmd) :
+ mlxsw_sp_to_ptys_speed(cmd->base.speed);
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index abfb9faadbc4..9b1906a65e11 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1458,6 +1458,10 @@ static void sh_eth_dev_exit(struct net_device *ndev)
+ sh_eth_get_stats(ndev);
+ sh_eth_reset(ndev);
+
++ /* Set the RMII mode again if required */
++ if (mdp->cd->rmiimode)
++ sh_eth_write(ndev, 0x1, RMIIMODE);
++
+ /* Set MAC address again */
+ update_mac_address(ndev);
+ }
+diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
+index c1ab976cc800..12b09e6e03ba 100644
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -249,10 +249,8 @@ static int dp83867_config_init(struct phy_device *phydev)
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
+ if (ret)
+ return ret;
+- }
+
+- if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
+- (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
++ /* Set up RGMII delays */
+ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
+index 512c8f1ea5b0..902f5e03ec94 100644
+--- a/drivers/scsi/cxgbi/libcxgbi.c
++++ b/drivers/scsi/cxgbi/libcxgbi.c
+@@ -640,6 +640,10 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
+
+ if (ndev->flags & IFF_LOOPBACK) {
+ ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
++ if (!ndev) {
++ err = -ENETUNREACH;
++ goto rel_neigh;
++ }
+ mtu = ndev->mtu;
+ pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
+ n->dev->name, ndev->name, mtu);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 0962fd544401..09c6a16fab93 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -1151,10 +1151,8 @@ static int __init alua_init(void)
+ int r;
+
+ kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
+- if (!kaluad_wq) {
+- /* Temporary failure, bypass */
+- return SCSI_DH_DEV_TEMP_BUSY;
+- }
++ if (!kaluad_wq)
++ return -ENOMEM;
+
+ r = scsi_register_device_handler(&alua_dh);
+ if (r != 0) {
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index ffea620a147d..259ee0d3c3e6 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -989,6 +989,8 @@ static struct domain_device *sas_ex_discover_expander(
+ list_del(&child->dev_list_node);
+ spin_unlock_irq(&parent->port->dev_list_lock);
+ sas_put_device(child);
++ sas_port_delete(phy->port);
++ phy->port = NULL;
+ return NULL;
+ }
+ list_add_tail(&child->siblings, &parent->ex_dev.children);
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 5ec2898d21cd..d34351c6b9af 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -6392,7 +6392,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
+ else
+ mask = DMA_BIT_MASK(32);
+
+- rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
++ rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
+ goto disable_device;
+diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
+index 77a5d6f4e1eb..8a242f609d3b 100644
+--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
++++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
+@@ -579,7 +579,7 @@ exit:
+ dev->colourfx.enable ? "true" : "false",
+ dev->colourfx.u, dev->colourfx.v,
+ ret, (ret == 0 ? 0 : -EINVAL));
+- return (ret == 0 ? 0 : EINVAL);
++ return (ret == 0 ? 0 : -EINVAL);
+ }
+
+ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
+@@ -603,7 +603,7 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
+ "%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
+ __func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
+ (ret == 0 ? 0 : -EINVAL));
+- return (ret == 0 ? 0 : EINVAL);
++ return (ret == 0 ? 0 : -EINVAL);
+ }
+
+ static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
+diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
+index 46e46894e918..84c4d83fcfca 100644
+--- a/drivers/tty/serial/sunhv.c
++++ b/drivers/tty/serial/sunhv.c
+@@ -396,7 +396,7 @@ static const struct uart_ops sunhv_pops = {
+ static struct uart_driver sunhv_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "sunhv",
+- .dev_name = "ttyS",
++ .dev_name = "ttyHV",
+ .major = TTY_MAJOR,
+ };
+
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index d7955dc56737..a1985a9ad2d6 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry,
+ if (sd) {
+ /* Coordinate with configfs_readdir */
+ spin_lock(&configfs_dirent_lock);
+- /* Coordinate with configfs_attach_attr where will increase
+- * sd->s_count and update sd->s_dentry to new allocated one.
+- * Only set sd->dentry to null when this dentry is the only
+- * sd owner.
+- * If not do so, configfs_d_iput may run just after
+- * configfs_attach_attr and set sd->s_dentry to null
+- * even it's still in use.
++ /*
++ * Set sd->s_dentry to null only when this dentry is the one
++ * that is going to be killed. Otherwise configfs_d_iput may
++ * run just after configfs_attach_attr and set sd->s_dentry to
++ * NULL even it's still in use.
+ */
+- if (atomic_read(&sd->s_count) <= 2)
++ if (sd->s_dentry == dentry)
+ sd->s_dentry = NULL;
+
+ spin_unlock(&configfs_dirent_lock);
+diff --git a/fs/inode.c b/fs/inode.c
+index cfc36d11bcb3..76f7535fe754 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1817,8 +1817,13 @@ int file_remove_privs(struct file *file)
+ int kill;
+ int error = 0;
+
+- /* Fast path for nothing security related */
+- if (IS_NOSEC(inode))
++ /*
++ * Fast path for nothing security related.
++ * As well for non-regular files, e.g. blkdev inodes.
++ * For example, blkdev_write_iter() might get here
++ * trying to remove privs which it is not allowed to.
++ */
++ if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
+ return 0;
+
+ kill = dentry_needs_remove_privs(dentry);
+diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
+index ef4ae0a545fe..9f7cc1d7ec4a 100644
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -62,6 +62,10 @@ static inline void mmdrop_async(struct mm_struct *mm)
+ * followed by taking the mmap_sem for writing before modifying the
+ * vmas or anything the coredump pretends not to change from under it.
+ *
++ * It also has to be called when mmgrab() is used in the context of
++ * the process, but then the mm_count refcount is transferred outside
++ * the context of the process to run down_write() on that pinned mm.
++ *
+ * NOTE: find_extend_vma() called from GUP context is the only place
+ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * for reading and outside the context of the process, so it is also
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 489dc6b60053..f3a69a4f0d57 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
+ unsigned long head;
+
+ again:
++ /*
++ * In order to avoid publishing a head value that goes backwards,
++ * we must ensure the load of @rb->head happens after we've
++ * incremented @rb->nest.
++ *
++ * Otherwise we can observe a @rb->head value before one published
++ * by an IRQ/NMI happening between the load and the increment.
++ */
++ barrier();
+ head = local_read(&rb->head);
+
+ /*
+- * IRQ/NMI can happen here, which means we can miss a head update.
++ * IRQ/NMI can happen here and advance @rb->head, causing our
++ * load above to be stale.
+ */
+
+- if (!local_dec_and_test(&rb->nest))
++ /*
++ * If this isn't the outermost nesting, we don't have to update
++ * @rb->user_page->data_head.
++ */
++ if (local_read(&rb->nest) > 1) {
++ local_dec(&rb->nest);
+ goto out;
++ }
+
+ /*
+ * Since the mmap() consumer (userspace) can run on a different CPU:
+@@ -85,12 +101,21 @@ again:
+ * See perf_output_begin().
+ */
+ smp_wmb(); /* B, matches C */
+- rb->user_page->data_head = head;
++ WRITE_ONCE(rb->user_page->data_head, head);
++
++ /*
++ * We must publish the head before decrementing the nest count,
++ * otherwise an IRQ/NMI can publish a more recent head value and our
++ * write will (temporarily) publish a stale value.
++ */
++ barrier();
++ local_set(&rb->nest, 0);
+
+ /*
+- * Now check if we missed an update -- rely on previous implied
+- * compiler barriers to force a re-read.
++ * Ensure we decrement @rb->nest before we validate the @rb->head.
++ * Otherwise we cannot be sure we caught the 'last' nested update.
+ */
++ barrier();
+ if (unlikely(head != local_read(&rb->head))) {
+ local_inc(&rb->nest);
+ goto again;
+@@ -464,7 +489,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
+ handle->aux_flags);
+ }
+
+- rb->user_page->aux_head = rb->aux_head;
++ WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
+ if (rb_need_aux_wakeup(rb))
+ wakeup = true;
+
+@@ -495,7 +520,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
+
+ rb->aux_head += size;
+
+- rb->user_page->aux_head = rb->aux_head;
++ WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
+ if (rb_need_aux_wakeup(rb)) {
+ perf_output_wakeup(handle);
+ handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index d27a73737f1a..acf66c5dc9bd 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1006,6 +1006,9 @@ static void collapse_huge_page(struct mm_struct *mm,
+ * handled by the anon_vma lock + PG_lock.
+ */
+ down_write(&mm->mmap_sem);
++ result = SCAN_ANY_PROCESS;
++ if (!mmget_still_valid(mm))
++ goto out;
+ result = hugepage_vma_revalidate(mm, address, &vma);
+ if (result)
+ goto out;
+diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
+index 7f369f1db7ae..b8e1a5e6a9d3 100644
+--- a/net/ax25/ax25_route.c
++++ b/net/ax25/ax25_route.c
+@@ -443,9 +443,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
+ }
+
+ if (ax25->sk != NULL) {
++ local_bh_disable();
+ bh_lock_sock(ax25->sk);
+ sock_reset_flag(ax25->sk, SOCK_ZAPPED);
+ bh_unlock_sock(ax25->sk);
++ local_bh_enable();
+ }
+
+ put:
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index dd83a81db55f..04b0e399361a 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2743,6 +2743,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
+ }
+
+ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
++ __acquires(tbl->lock)
+ __acquires(rcu_bh)
+ {
+ struct neigh_seq_state *state = seq->private;
+@@ -2753,6 +2754,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
+
+ rcu_read_lock_bh();
+ state->nht = rcu_dereference_bh(tbl->nht);
++ read_lock(&tbl->lock);
+
+ return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
+ }
+@@ -2786,8 +2788,13 @@ out:
+ EXPORT_SYMBOL(neigh_seq_next);
+
+ void neigh_seq_stop(struct seq_file *seq, void *v)
++ __releases(tbl->lock)
+ __releases(rcu_bh)
+ {
++ struct neigh_seq_state *state = seq->private;
++ struct neigh_table *tbl = state->tbl;
++
++ read_unlock(&tbl->lock);
+ rcu_read_unlock_bh();
+ }
+ EXPORT_SYMBOL(neigh_seq_stop);
+diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
+index 6fa2bc236d9e..c7458a606e2e 100644
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -254,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
+ rcu_read_lock_bh();
+ for_each_sk_fl_rcu(np, sfl) {
+ struct ip6_flowlabel *fl = sfl->fl;
+- if (fl->label == label) {
++
++ if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
+ fl->lastuse = jiffies;
+- atomic_inc(&fl->users);
+ rcu_read_unlock_bh();
+ return fl;
+ }
+@@ -623,7 +623,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
+ goto done;
+ }
+ fl1 = sfl->fl;
+- atomic_inc(&fl1->users);
++ if (!atomic_inc_not_zero(&fl1->users))
++ fl1 = NULL;
+ break;
+ }
+ }
+diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
+index e15314e3b464..299de0c0508e 100644
+--- a/net/lapb/lapb_iface.c
++++ b/net/lapb/lapb_iface.c
+@@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev)
+ lapb = __lapb_devtostruct(dev);
+ if (!lapb)
+ goto out;
++ lapb_put(lapb);
+
+ lapb_stop_t1timer(lapb);
+ lapb_stop_t2timer(lapb);
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index d1c0378144f3..ee97ce176b9a 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -2268,7 +2268,6 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
+ {
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+- nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */
+ ip_vs_conn_net_cleanup(ipvs);
+ ip_vs_app_net_cleanup(ipvs);
+@@ -2283,6 +2282,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
+ {
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ EnterFunction(2);
++ nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ ipvs->enable = 0; /* Disable packet reception */
+ smp_wmb();
+ ip_vs_sync_net_cleanup(ipvs);
+diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
+index f7e21953b1de..8260b1e73bbd 100644
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -193,6 +193,7 @@ static unsigned int nf_iterate(struct sk_buff *skb,
+ repeat:
+ verdict = nf_hook_entry_hookfn(hook, skb, state);
+ if (verdict != NF_ACCEPT) {
++ *index = i;
+ if (verdict != NF_REPEAT)
+ return verdict;
+ goto repeat;
+diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
+index 04a3128adcf0..b9377afeaba4 100644
+--- a/net/openvswitch/vport-internal_dev.c
++++ b/net/openvswitch/vport-internal_dev.c
+@@ -176,7 +176,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
+ {
+ struct vport *vport;
+ struct internal_dev *internal_dev;
++ struct net_device *dev;
+ int err;
++ bool free_vport = true;
+
+ vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
+ if (IS_ERR(vport)) {
+@@ -184,8 +186,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
+ goto error;
+ }
+
+- vport->dev = alloc_netdev(sizeof(struct internal_dev),
+- parms->name, NET_NAME_USER, do_setup);
++ dev = alloc_netdev(sizeof(struct internal_dev),
++ parms->name, NET_NAME_USER, do_setup);
++ vport->dev = dev;
+ if (!vport->dev) {
+ err = -ENOMEM;
+ goto error_free_vport;
+@@ -207,8 +210,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
+
+ rtnl_lock();
+ err = register_netdevice(vport->dev);
+- if (err)
++ if (err) {
++ free_vport = false;
+ goto error_unlock;
++ }
+
+ dev_set_promiscuity(vport->dev, 1);
+ rtnl_unlock();
+@@ -218,11 +223,12 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
+
+ error_unlock:
+ rtnl_unlock();
+- free_percpu(vport->dev->tstats);
++ free_percpu(dev->tstats);
+ error_free_netdev:
+- free_netdev(vport->dev);
++ free_netdev(dev);
+ error_free_vport:
+- ovs_vport_free(vport);
++ if (free_vport)
++ ovs_vport_free(vport);
+ error:
+ return ERR_PTR(err);
+ }
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 4edb4f5596b8..f67df16bd340 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -2586,6 +2586,8 @@ do_addr_param:
+ case SCTP_PARAM_STATE_COOKIE:
+ asoc->peer.cookie_len =
+ ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
++ if (asoc->peer.cookie)
++ kfree(asoc->peer.cookie);
+ asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
+ if (!asoc->peer.cookie)
+ retval = 0;
+@@ -2650,6 +2652,8 @@ do_addr_param:
+ goto fall_through;
+
+ /* Save peer's random parameter */
++ if (asoc->peer.peer_random)
++ kfree(asoc->peer.peer_random);
+ asoc->peer.peer_random = kmemdup(param.p,
+ ntohs(param.p->length), gfp);
+ if (!asoc->peer.peer_random) {
+@@ -2663,6 +2667,8 @@ do_addr_param:
+ goto fall_through;
+
+ /* Save peer's HMAC list */
++ if (asoc->peer.peer_hmacs)
++ kfree(asoc->peer.peer_hmacs);
+ asoc->peer.peer_hmacs = kmemdup(param.p,
+ ntohs(param.p->length), gfp);
+ if (!asoc->peer.peer_hmacs) {
+@@ -2678,6 +2684,8 @@ do_addr_param:
+ if (!ep->auth_enable)
+ goto fall_through;
+
++ if (asoc->peer.peer_chunks)
++ kfree(asoc->peer.peer_chunks);
+ asoc->peer.peer_chunks = kmemdup(param.p,
+ ntohs(param.p->length), gfp);
+ if (!asoc->peer.peer_chunks)
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 65fb1e7edb9c..d349f69ef03c 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -376,6 +376,7 @@ enum {
+
+ #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+ #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
++#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
+
+ static char *driver_short_names[] = {
+ [AZX_DRIVER_ICH] = "HDA Intel",
+@@ -1751,8 +1752,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
+ else
+ chip->bdl_pos_adj = bdl_pos_adj[dev];
+
+- /* Workaround for a communication error on CFL (bko#199007) */
+- if (IS_CFL(pci))
++ /* Workaround for a communication error on CFL (bko#199007) and CNL */
++ if (IS_CFL(pci) || IS_CNL(pci))
+ chip->polling_mode = 1;
+
+ err = azx_bus_init(chip, model[dev], &pci_hda_io_ops);
+diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
+index 0b2054007314..a19690a17291 100644
+--- a/tools/perf/arch/s390/util/machine.c
++++ b/tools/perf/arch/s390/util/machine.c
+@@ -5,16 +5,19 @@
+ #include "util.h"
+ #include "machine.h"
+ #include "api/fs/fs.h"
++#include "debug.h"
+
+ int arch__fix_module_text_start(u64 *start, const char *name)
+ {
++ u64 m_start = *start;
+ char path[PATH_MAX];
+
+ snprintf(path, PATH_MAX, "module/%.*s/sections/.text",
+ (int)strlen(name) - 2, name + 1);
+-
+- if (sysfs__read_ull(path, (unsigned long long *)start) < 0)
+- return -1;
++ if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
++ pr_debug2("Using module %s start:%#lx\n", path, m_start);
++ *start = m_start;
++ }
+
+ return 0;
+ }
+diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
+index 2346cecb8ea2..5131304ea3a8 100644
+--- a/tools/perf/util/data-convert-bt.c
++++ b/tools/perf/util/data-convert-bt.c
+@@ -271,7 +271,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string)
+ if (i > 0)
+ strncpy(buffer, string, i);
+ }
+- strncat(buffer + p, numstr, 4);
++ memcpy(buffer + p, numstr, 4);
+ p += 3;
+ }
+ }
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 968fd0454e6b..d246080cd85e 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -156,6 +156,9 @@ void machine__delete_threads(struct machine *machine)
+
+ void machine__exit(struct machine *machine)
+ {
++ if (machine == NULL)
++ return;
++
+ machine__destroy_kernel_maps(machine);
+ map_groups__exit(&machine->kmaps);
+ dsos__exit(&machine->dsos);
+diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
+index 8ec76681605c..f25f72a75cf3 100755
+--- a/tools/testing/selftests/netfilter/nft_nat.sh
++++ b/tools/testing/selftests/netfilter/nft_nat.sh
+@@ -23,7 +23,11 @@ ip netns add ns0
+ ip netns add ns1
+ ip netns add ns2
+
+-ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
++ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1
++if [ $? -ne 0 ];then
++ echo "SKIP: No virtual ethernet pair device support in kernel"
++ exit $ksft_skip
++fi
+ ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+
+ ip -net ns0 link set lo up