Merge tag 'perf-tools-fixes-2020-07-07' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux
Pull perf tooling fixes from Arnaldo Carvalho de Melo:
- Intel PT fixes for PEBS-via-PT with registers
- Fixes for Intel PT python based GUI
- Avoid duplicated sideband events with Intel PT in system wide tracing
- Remove needless 'dummy' event from TUI menu, used when synthesizing
meta data events for pre-existing processes
- Fix corner case segfault when pressing enter in a screen without
entries in the TUI for report/top
- Fixes for time stamp handling in libtraceevent
- Explicitly set utf-8 encoding in perf flamegraph
- Update arch/x86/lib/memcpy_64.S copy used in 'perf bench mem memcpy',
silencing perf build warning
* tag 'perf-tools-fixes-2020-07-07' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
perf report TUI: Remove needless 'dummy' event from menu
perf intel-pt: Fix PEBS sample for XMM registers
perf intel-pt: Fix displaying PEBS-via-PT with registers
perf intel-pt: Fix recording PEBS-via-PT with registers
perf report TUI: Fix segmentation fault in perf_evsel__hists_browse()
tools lib traceevent: Add proper KBUFFER_TYPE_TIME_STAMP handling
tools lib traceevent: Add API to read time information from kbuffer
perf scripts python: exported-sql-viewer.py: Fix time chart call tree
perf scripts python: exported-sql-viewer.py: Fix zero id in call tree 'Find' result
perf scripts python: exported-sql-viewer.py: Fix zero id in call graph 'Find' result
perf scripts python: exported-sql-viewer.py: Fix unexpanded 'Find' result
perf record: Fix duplicated sideband events with Intel PT system wide tracing
perf scripts python: export-to-postgresql.py: Fix struct.pack() int argument
tools arch: Update arch/x86/lib/memcpy_64.S copy used in 'perf bench mem memcpy'
perf flamegraph: Explicitly set utf-8 encoding
diff --git a/.gitignore b/.gitignore
index 87b9dd8..d5f4804 100644
--- a/.gitignore
+++ b/.gitignore
@@ -143,6 +143,9 @@
/allrandom.config
/allyes.config
+# Kconfig savedefconfig output
+/defconfig
+
# Kdevelop4
*.kdev4
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 936cf2a..3f7c3a7 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -147,6 +147,14 @@
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
+----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1463225 |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1418040 |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1530923 |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 |
++----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/i2c/slave-eeprom-backend.rst b/Documentation/i2c/slave-eeprom-backend.rst
index 0b8cd83..38d951f 100644
--- a/Documentation/i2c/slave-eeprom-backend.rst
+++ b/Documentation/i2c/slave-eeprom-backend.rst
@@ -1,14 +1,26 @@
==============================
-Linux I2C slave eeprom backend
+Linux I2C slave EEPROM backend
==============================
-by Wolfram Sang <[email protected]> in 2014-15
+by Wolfram Sang <[email protected]> in 2014-20
-This is a proof-of-concept backend which acts like an EEPROM on the connected
-I2C bus. The memory contents can be modified from userspace via this file
-located in sysfs::
+This backend simulates an EEPROM on the connected I2C bus. Its memory contents
+can be accessed from userspace via this file located in sysfs::
/sys/bus/i2c/devices/<device-directory>/slave-eeprom
+The following types are available: 24c02, 24c32, 24c64, and 24c512. Read-only
+variants are also supported. The name needed for instantiating has the form
+'slave-<type>[ro]'. Examples follow:
+
+24c02, read/write, address 0x64:
+ # echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-1/new_device
+
+24c512, read-only, address 0x42:
+ # echo slave-24c512ro 0x1042 > /sys/bus/i2c/devices/i2c-1/new_device
+
+You can also preload data during boot if a device-property named
+'firmware-name' contains a valid filename (DT or ACPI only).
+
As of 2015, Linux doesn't support poll on binary sysfs files, so there is no
notification when another master changed the content.
diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
index a45cccf..85ccc87 100644
--- a/Documentation/kbuild/modules.rst
+++ b/Documentation/kbuild/modules.rst
@@ -182,7 +182,8 @@
8123_pci.c
8123_bin.o_shipped <= Binary blob
---- 3.1 Shared Makefile
+3.1 Shared Makefile
+-------------------
An external module always includes a wrapper makefile that
supports building the module using "make" with no arguments.
@@ -470,9 +471,9 @@
The syntax of the Module.symvers file is::
- <CRC> <Symbol> <Module> <Export Type> <Namespace>
+ <CRC> <Symbol> <Module> <Export Type> <Namespace>
- 0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE
+ 0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE
The fields are separated by tabs and values may be empty (e.g.
if no namespace is defined for an exported symbol).
diff --git a/Documentation/kbuild/reproducible-builds.rst b/Documentation/kbuild/reproducible-builds.rst
index 5033938..3b25655 100644
--- a/Documentation/kbuild/reproducible-builds.rst
+++ b/Documentation/kbuild/reproducible-builds.rst
@@ -101,7 +101,7 @@
If you enable ``CONFIG_GCC_PLUGIN_RANDSTRUCT``, you will need to
pre-generate the random seed in
-``scripts/gcc-plgins/randomize_layout_seed.h`` so the same value
+``scripts/gcc-plugins/randomize_layout_seed.h`` so the same value
is used in rebuilds.
Debug info conflicts
diff --git a/Makefile b/Makefile
index a60c985..fe0164a 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 8
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@@ -970,8 +970,8 @@
endif
# Align the bit size of userspace programs with the kernel
-KBUILD_USERCFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
-KBUILD_USERLDFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
# make the checker run with the right architecture
CHECKFLAGS += --arch=$(ARCH)
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index fddc700..197896c 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -170,6 +170,15 @@
endchoice
+config ARC_TUNE_MCPU
+ string "Override default -mcpu compiler flag"
+ default ""
+ help
+ Override default -mcpu=xxx compiler flag (which is set depending on
+ the ISA version) with the specified value.
+ NOTE: If specified flag isn't supported by current compiler the
+ ISA default value will be used as a fallback.
+
config CPU_BIG_ENDIAN
bool "Enable Big Endian Mode"
help
@@ -465,6 +474,12 @@
This is programmable and can be optionally disabled in which case
software INTERRUPT_PROLOGUE/EPILGUE do the needed work
+config ARC_LPB_DISABLE
+ bool "Disable loop buffer (LPB)"
+ help
+ On HS cores, loop buffer (LPB) is programmable in runtime and can
+ be optionally disabled.
+
endif # ISA_ARCV2
endmenu # "ARC CPU Configuration"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 20e9ab6c..d00f8b8 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -10,8 +10,25 @@
endif
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
-cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38
+
+tune-mcpu-def-$(CONFIG_ISA_ARCOMPACT) := -mcpu=arc700
+tune-mcpu-def-$(CONFIG_ISA_ARCV2) := -mcpu=hs38
+
+ifeq ($(CONFIG_ARC_TUNE_MCPU),"")
+cflags-y += $(tune-mcpu-def-y)
+else
+tune-mcpu := $(shell echo $(CONFIG_ARC_TUNE_MCPU))
+tune-mcpu-ok := $(call cc-option-yn, $(tune-mcpu))
+ifeq ($(tune-mcpu-ok),y)
+cflags-y += $(tune-mcpu)
+else
+# The flag provided by 'CONFIG_ARC_TUNE_MCPU' option isn't known by this compiler
+# (probably the compiler is too old). Use ISA default mcpu flag instead as a safe option.
+$(warning ** WARNING ** CONFIG_ARC_TUNE_MCPU flag '$(tune-mcpu)' is unknown, fallback to '$(tune-mcpu-def-y)')
+cflags-y += $(tune-mcpu-def-y)
+endif
+endif
+
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index c77a0e3..0284ace 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -19,7 +19,7 @@
#define R_ARC_32_PCREL 0x31
/*to set parameters in the core dumps */
-#define ELF_ARCH EM_ARCOMPACT
+#define ELF_ARCH EM_ARC_INUSE
#define ELF_CLASS ELFCLASS32
#ifdef CONFIG_CPU_BIG_ENDIAN
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index 7fc73fe..863d63a 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -90,6 +90,9 @@ static inline void arch_local_irq_restore(unsigned long flags)
/*
* Unconditionally Enable IRQs
*/
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+extern void arch_local_irq_enable(void);
+#else
static inline void arch_local_irq_enable(void)
{
unsigned long temp;
@@ -102,7 +105,7 @@ static inline void arch_local_irq_enable(void)
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
: "cc", "memory");
}
-
+#endif
/*
* Unconditionally Disable IRQs
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 60406ec..ea00c8a 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -165,7 +165,6 @@
tracesys:
; save EFA in case tracer wants the PC of traced task
; using ERET won't work since next-PC has already committed
- lr r12, [efa]
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
@@ -208,15 +207,9 @@
; Breakpoint TRAP
; ---------------------------------------------
trap_with_param:
-
- ; stop_pc info by gdb needs this info
- lr r0, [efa]
+ mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
mov r1, sp
- ; Now that we have read EFA, it is safe to do "fake" rtie
- ; and get out of CPU exception mode
- FAKE_RET_FROM_EXCPN
-
; Save callee regs in case gdb wants to have a look
; SP will grow up by size of CALLEE Reg-File
; NOTE: clobbers r12
@@ -243,6 +236,10 @@
EXCEPTION_PROLOGUE
+ lr r12, [efa]
+
+ FAKE_RET_FROM_EXCPN
+
;============ TRAP 1 :breakpoints
; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
bmsk.f 0, r10, 7
@@ -250,9 +247,6 @@
;============ TRAP (no param): syscall top level
- ; First return from Exception to pure K mode (Exception/IRQs renabled)
- FAKE_RET_FROM_EXCPN
-
; If syscall tracing ongoing, invoke pre-post-hooks
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 6eb23f1..17fd1ed 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -59,6 +59,14 @@
bclr r5, r5, STATUS_AD_BIT
#endif
kflag r5
+
+#ifdef CONFIG_ARC_LPB_DISABLE
+ lr r5, [ARC_REG_LPB_BUILD]
+ breq r5, 0, 1f ; LPB doesn't exist
+ mov r5, 1
+ sr r5, [ARC_REG_LPB_CTRL]
+1:
+#endif /* CONFIG_ARC_LPB_DISABLE */
#endif
; Config DSP_CTRL properly, so kernel may use integer multiply,
; multiply-accumulate, and divide operations
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index dad8a65..41f07b3 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -58,10 +58,12 @@ static const struct id_to_str arc_legacy_rel[] = {
{ 0x00, NULL }
};
-static const struct id_to_str arc_cpu_rel[] = {
+static const struct id_to_str arc_hs_ver54_rel[] = {
/* UARCH.MAJOR, Release */
{ 0, "R3.10a"},
{ 1, "R3.50a"},
+ { 2, "R3.60a"},
+ { 3, "R4.00a"},
{ 0xFF, NULL }
};
@@ -117,12 +119,6 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
struct bcr_uarch_build_arcv2 uarch;
const struct id_to_str *tbl;
- /*
- * Up until (including) the first core4 release (0x54) things were
- * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
- * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
- */
-
if (cpu->core.family < 0x54) { /* includes arc700 */
for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
@@ -143,11 +139,10 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
}
/*
- * However the subsequent HS release (same 0x54) allow HS38 or HS48
- * configurations and encode this info in a different BCR.
- * The BCR was introduced in 0x54 so can't be read unconditionally.
+ * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
+ * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
+ * releases only update it.
*/
-
READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
if (uarch.prod == 4) {
@@ -158,7 +153,7 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
cpu->name = "HS38";
}
- for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
+ for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
if (uarch.maj == tbl->id) {
cpu->release = tbl->str;
break;
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index fd4e1ce1..e93145d 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -241,7 +241,6 @@ static int __init fdt_find_hyper_node(unsigned long node, const char *uname,
* see Documentation/devicetree/bindings/arm/xen.txt for the
* documentation of the Xen Device Tree format.
*/
-#define GRANT_TABLE_PHYSADDR 0
void __init xen_early_init(void)
{
of_scan_flat_dt(fdt_find_hyper_node, NULL);
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 5e5dc05..12f0eb5 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -73,11 +73,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature) \
".popsection\n" \
- ".pushsection .altinstr_replacement, \"a\"\n" \
+ ".subsection 1\n" \
"663:\n\t" \
newinstr "\n" \
"664:\n\t" \
- ".popsection\n\t" \
+ ".previous\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
".org . - (662b-661b) + (664b-663b)\n" \
".endif\n"
@@ -117,9 +117,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
662: .pushsection .altinstructions, "a"
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
.popsection
- .pushsection .altinstr_replacement, "ax"
+ .subsection 1
663: \insn2
-664: .popsection
+664: .previous
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.endif
@@ -160,7 +160,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
.pushsection .altinstructions, "a"
altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
.popsection
- .pushsection .altinstr_replacement, "ax"
+ .subsection 1
.align 2 /* So GAS knows label 661 is suitably aligned */
661:
.endm
@@ -179,9 +179,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
.macro alternative_else
662:
.if .Lasm_alt_mode==0
- .pushsection .altinstr_replacement, "ax"
+ .subsection 1
.else
- .popsection
+ .previous
.endif
663:
.endm
@@ -192,7 +192,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
.macro alternative_endif
664:
.if .Lasm_alt_mode==0
- .popsection
+ .previous
.endif
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index a358e97..6647ae4 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
return read_sysreg_s(SYS_ICC_PMR_EL1);
}
-static inline void gic_write_pmr(u32 val)
+static __always_inline void gic_write_pmr(u32 val)
{
write_sysreg_s(val, SYS_ICC_PMR_EL1);
}
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 5d1f4ae..f7c3d1f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void)
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
}
-static inline bool system_uses_irq_prio_masking(void)
+static __always_inline bool system_uses_irq_prio_masking(void)
{
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index a87a93f..7219cdd 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -86,6 +86,7 @@
#define QCOM_CPU_PART_FALKOR 0xC00
#define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
+#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
#define NVIDIA_CPU_PART_DENVER 0x003
@@ -114,6 +115,7 @@
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index cf50c53..8e302dc 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -472,12 +472,7 @@ static bool
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
int scope)
{
- u32 midr = read_cpuid_id();
- /* Cortex-A76 r0p0 - r3p1 */
- struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
-
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+ return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
}
#endif
@@ -728,6 +723,8 @@ static const struct midr_range erratum_1418040_list[] = {
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
/* Neoverse-N1 r0p0 to r3p1 */
MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
+ /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+ MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
{},
};
#endif
@@ -772,11 +769,22 @@ static const struct midr_range erratum_speculative_at_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_1530923
/* Cortex A55 r0p0 to r2p0 */
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
+ /* Kryo4xx Silver (rdpe => r1p0) */
+ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
{},
};
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+static const struct midr_range erratum_1463225[] = {
+ /* Cortex-A76 r0p0 - r3p1 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+ /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+ MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@@ -916,6 +924,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_WORKAROUND_1463225,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_cortex_a76_erratum_1463225,
+ .midr_range_list = erratum_1463225,
},
#endif
#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 9f63053..9fae0ef 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1408,6 +1408,8 @@ static bool cpu_has_broken_dbm(void)
static const struct midr_range cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_1024718
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
+ /* Kryo4xx Silver (rdpe => r1p0) */
+ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
{},
};
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index cbe49cd1..5290f17 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -122,7 +122,7 @@ void *alloc_insn_page(void)
{
return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
- NUMA_NO_NODE, __func__);
+ NUMA_NO_NODE, __builtin_return_address(0));
}
/* arm kprobe: install breakpoint in text */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 6827da7..5423ffe 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -165,9 +165,6 @@
*(.altinstructions)
__alt_instructions_end = .;
}
- .altinstr_replacement : {
- *(.altinstr_replacement)
- }
. = ALIGN(SEGMENT_ALIGN);
__inittext_end = .;
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 6e6ed55..e76c0e8 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -136,11 +136,15 @@
1: cmp x0, #HVC_RESET_VECTORS
b.ne 1f
-reset:
+
/*
- * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
- * case we coming via HVC_SOFT_RESTART.
+ * Set the HVC_RESET_VECTORS return code before entering the common
+ * path so that we do not clobber x0-x2 in case we are coming via
+ * HVC_SOFT_RESTART.
*/
+ mov x0, xzr
+reset:
+ /* Reset kvm back to the hyp stub. */
mrs x5, sctlr_el2
mov_q x6, SCTLR_ELx_FLAGS
bic x5, x5, x6 // Clear SCTL_M and etc
@@ -151,7 +155,6 @@
/* Install stub vectors */
adr_l x5, __hyp_stub_vectors
msr vbar_el2, x5
- mov x0, xzr
eret
1: /* Bad stub call */
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index b5ae3a5..3c22416 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
}
/*
- * On VHE ensure that only guest events have EL0 counting enabled
+ * On VHE ensure that only guest events have EL0 counting enabled.
+ * This is called from both vcpu_{load,put} and the sysreg handling.
+ * Since the latter is preemptible, special care must be taken to
+ * disable preemption.
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
if (!has_vhe())
return;
+ preempt_disable();
host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
kvm_vcpu_pmu_enable_el0(events_guest);
kvm_vcpu_pmu_disable_el0(events_host);
+ preempt_enable();
}
/*
diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c
index 1e0f4c2..f7b52ce 100644
--- a/arch/arm64/kvm/pvtime.c
+++ b/arch/arm64/kvm/pvtime.c
@@ -3,6 +3,7 @@
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
+#include <linux/sched/stat.h>
#include <asm/kvm_mmu.h>
#include <asm/pvclock-abi.h>
@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return base;
}
+static bool kvm_arm_pvtime_supported(void)
+{
+ return !!sched_info_on();
+}
+
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
int ret = 0;
int idx;
- if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+ if (!kvm_arm_pvtime_supported() ||
+ attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
return -ENXIO;
if (get_user(ipa, user))
@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
u64 __user *user = (u64 __user *)attr->addr;
u64 ipa;
- if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+ if (!kvm_arm_pvtime_supported() ||
+ attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
return -ENXIO;
ipa = vcpu->arch.steal.base;
@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
{
switch (attr->attr) {
case KVM_ARM_VCPU_PVTIME_IPA:
- return 0;
+ if (kvm_arm_pvtime_supported())
+ return 0;
}
return -ENXIO;
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index d3b2090..6ed36be5 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
- int ret = -EINVAL;
+ int ret;
bool loaded;
u32 pstate;
@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
- if (kvm_vcpu_enable_ptrauth(vcpu))
+ if (kvm_vcpu_enable_ptrauth(vcpu)) {
+ ret = -EINVAL;
goto out;
+ }
}
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
- if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
+ if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
+ ret = -EINVAL;
goto out;
+ }
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index 27ac833..b5fa73c 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq);
+ /*
+ * The v4.1 doorbell can fire concurrently with the vPE being
+ * made non-resident. Ensure we only update pending_last
+ * *after* the non-residency sequence has completed.
+ */
+ raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+ raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
+
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
diff --git a/arch/mips/boot/dts/ingenic/gcw0.dts b/arch/mips/boot/dts/ingenic/gcw0.dts
index 8d22828..bc72304 100644
--- a/arch/mips/boot/dts/ingenic/gcw0.dts
+++ b/arch/mips/boot/dts/ingenic/gcw0.dts
@@ -92,7 +92,7 @@ sound {
"MIC1N", "Built-in Mic";
simple-audio-card,pin-switches = "Speaker", "Headphones";
- simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_HIGH>;
+ simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_LOW>;
simple-audio-card,aux-devs = <&speaker_amp>, <&headphones_amp>;
simple-audio-card,bitclock-master = <&dai_codec>;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 7c32c95..f655af6 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -723,12 +723,14 @@ static int simulate_loongson3_cpucfg(struct pt_regs *regs,
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
/* Do not emulate on unsupported core models. */
- if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data))
+ preempt_disable();
+ if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data)) {
+ preempt_enable();
return -1;
-
+ }
regs->regs[rd] = loongson3_cpucfg_read_synthesized(
¤t_cpu_data, sel);
-
+ preempt_enable();
return 0;
}
@@ -2169,6 +2171,7 @@ static void configure_status(void)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
+ back_to_back_c0_hazard();
}
unsigned int hwrena;
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index aa37545..b103420 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -514,8 +514,8 @@ void __init ltq_soc_init(void)
clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
PMU_PPE_DP | PMU_PPE_TC);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
- clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
- clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
@@ -538,8 +538,8 @@ void __init ltq_soc_init(void)
PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
PMU_PPE_QSB | PMU_PPE_TOP);
- clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
- clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index 1199fc2..ca5fcb4 100644
--- a/arch/powerpc/mm/book3s64/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -353,9 +353,6 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
int pkey_shift;
u64 amr;
- if (!is_pkey_enabled(pkey))
- return true;
-
pkey_shift = pkeyshift(pkey);
if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift)))
return true;
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 46038bc..0cf9a82 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -1,5 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_WATCH_QUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
@@ -14,7 +15,6 @@
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
@@ -31,9 +31,9 @@
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
-CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_LSM=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@@ -51,14 +51,11 @@
CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_S390_UNWIND_SELFTEST=y
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
@@ -77,6 +74,8 @@
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
@@ -96,7 +95,6 @@
CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
-CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
@@ -130,6 +128,7 @@
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESPINTCP=y
CONFIG_INET_IPCOMP=m
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
@@ -144,6 +143,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESPINTCP=y
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
CONFIG_IPV6_VTI=m
@@ -151,7 +151,10 @@
CONFIG_IPV6_GRE=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
+CONFIG_MPTCP=y
CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@@ -317,6 +320,7 @@
CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m
CONFIG_BRIDGE=m
+CONFIG_BRIDGE_MRP=y
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_NET_SCHED=y
@@ -341,6 +345,7 @@
CONFIG_NET_SCH_FQ_CODEL=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_ETS=m
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
@@ -364,6 +369,7 @@
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GATE=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m
@@ -374,6 +380,7 @@
CONFIG_NET_PKTGEN=m
# CONFIG_NET_DROP_MONITOR is not set
CONFIG_PCI=y
+# CONFIG_PCIEASPM is not set
CONFIG_PCI_DEBUG=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
@@ -435,6 +442,7 @@
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
CONFIG_DM_DELAY=m
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
@@ -448,6 +456,8 @@
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_BAREUDP=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
@@ -481,7 +491,6 @@
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
-# CONFIG_MLXFW is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -514,6 +523,7 @@
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -561,6 +571,8 @@
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
@@ -608,6 +620,7 @@
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
+CONFIG_EXFAT_FS=m
CONFIG_NTFS_FS=m
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
@@ -650,8 +663,8 @@
CONFIG_DLM=m
CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_FORTIFY_SOURCE=y
@@ -675,8 +688,11 @@
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -685,6 +701,7 @@
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
@@ -701,6 +718,7 @@
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
@@ -719,6 +737,9 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_STATS=y
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
@@ -774,6 +795,7 @@
CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y
+CONFIG_TEST_LOCKUP=m
CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
@@ -786,7 +808,9 @@
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
+# CONFIG_RCU_TRACE is not set
CONFIG_LATENCYTOP=y
+CONFIG_BOOTTIME_TRACING=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y
CONFIG_IRQSOFF_TRACER=y
@@ -808,10 +832,12 @@
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
+CONFIG_TEST_MIN_HEAP=y
CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 7cd0648..5df9759 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -1,5 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_WATCH_QUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
@@ -13,7 +14,6 @@
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
@@ -30,9 +30,9 @@
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
-CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_LSM=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@@ -41,7 +41,6 @@
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
-# CONFIG_NUMA_EMU is not set
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
@@ -51,14 +50,11 @@
CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
@@ -74,6 +70,8 @@
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
@@ -91,7 +89,6 @@
CONFIG_FRONTSWAP=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
-CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
@@ -125,6 +122,7 @@
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESPINTCP=y
CONFIG_INET_IPCOMP=m
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
@@ -139,6 +137,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESPINTCP=y
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
CONFIG_IPV6_VTI=m
@@ -146,7 +145,10 @@
CONFIG_IPV6_GRE=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
+CONFIG_MPTCP=y
CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@@ -311,6 +313,7 @@
CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m
CONFIG_BRIDGE=m
+CONFIG_BRIDGE_MRP=y
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_NET_SCHED=y
@@ -335,6 +338,7 @@
CONFIG_NET_SCH_FQ_CODEL=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_ETS=m
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
@@ -358,6 +362,7 @@
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GATE=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m
@@ -368,6 +373,7 @@
CONFIG_NET_PKTGEN=m
# CONFIG_NET_DROP_MONITOR is not set
CONFIG_PCI=y
+# CONFIG_PCIEASPM is not set
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_UEVENT_HELPER=y
@@ -430,6 +436,7 @@
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
CONFIG_DM_DELAY=m
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
@@ -444,6 +451,8 @@
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_BAREUDP=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
@@ -477,7 +486,6 @@
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
-# CONFIG_MLXFW is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -510,6 +518,7 @@
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -557,6 +566,8 @@
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
@@ -600,6 +611,7 @@
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
+CONFIG_EXFAT_FS=m
CONFIG_NTFS_FS=m
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
@@ -642,8 +654,8 @@
CONFIG_DLM=m
CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
@@ -667,8 +679,11 @@
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
@@ -678,6 +693,7 @@
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
@@ -694,6 +710,7 @@
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
@@ -712,6 +729,9 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_STATS=y
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
@@ -725,6 +745,7 @@
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CORDIC=m
+CONFIG_PRIME_NUMBERS=m
CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
@@ -739,10 +760,12 @@
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y
+CONFIG_TEST_LOCKUP=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
+CONFIG_BOOTTIME_TRACING=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y
CONFIG_SCHED_TRACER=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 20c51e5..4091c50 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -30,6 +30,7 @@
# CONFIG_BOUNCE is not set
CONFIG_NET=y
# CONFIG_IUCV is not set
+# CONFIG_ETHTOOL_NETLINK is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_BLK_DEV_XPRAM is not set
@@ -55,6 +56,8 @@
# CONFIG_MONWRITER is not set
# CONFIG_S390_VMUR is not set
# CONFIG_HID is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_VHOST_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
@@ -62,7 +65,9 @@
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_LSM="yama,loadpin,safesetid,integrity"
+# CONFIG_ZLIB_DFLTCC is not set
CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index cee3cb6..6ea0820 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -31,12 +31,12 @@
#define KVM_USER_MEM_SLOTS 32
/*
- * These seem to be used for allocating ->chip in the routing table,
- * which we don't use. 4096 is an out-of-thin-air value. If we need
- * to look at ->chip later on, we'll need to revisit this.
+ * These seem to be used for allocating ->chip in the routing table, which we
+ * don't use. 1 is as small as we can get to reduce the needed memory. If we
+ * need to look at ->chip later on, we'll need to revisit this.
*/
#define KVM_NR_IRQCHIPS 1
-#define KVM_IRQCHIP_NUM_PINS 4096
+#define KVM_IRQCHIP_NUM_PINS 1
#define KVM_HALT_POLL_NS_DEFAULT 50000
/* s390-specific vcpu->requests bit members */
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 85a711d..4f9e462 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -881,12 +881,21 @@ static int __hw_perf_event_init(struct perf_event *event)
return err;
}
+static bool is_callchain_event(struct perf_event *event)
+{
+ u64 sample_type = event->attr.sample_type;
+
+ return sample_type & (PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER |
+ PERF_SAMPLE_STACK_USER);
+}
+
static int cpumsf_pmu_event_init(struct perf_event *event)
{
int err;
/* No support for taken branch sampling */
- if (has_branch_stack(event))
+ /* No support for callchain, stacks and registers */
+ if (has_branch_stack(event) || is_callchain_event(event))
return -EOPNOTSUPP;
switch (event->attr.type) {
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5853c98..07aa15b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1100,6 +1100,7 @@ void __init setup_arch(char **cmdline_p)
if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
nospec_auto_detect();
+ jump_label_init();
parse_early_param();
#ifdef CONFIG_CRASH_DUMP
/* Deactivate elfcorehdr= kernel parameter */
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 22a0be6..1d17413 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -62,11 +62,15 @@ notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
long copied;
spin_lock_irqsave(&s390_kernel_write_lock, flags);
- while (size) {
- copied = s390_kernel_write_odd(tmp, src, size);
- tmp += copied;
- src += copied;
- size -= copied;
+ if (!(flags & PSW_MASK_DAT)) {
+ memcpy(dst, src, size);
+ } else {
+ while (size) {
+ copied = s390_kernel_write_odd(tmp, src, size);
+ tmp += copied;
+ src += copied;
+ size -= copied;
+ }
}
spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 08e1d61..fdebd28 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -94,7 +94,18 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
}
zdev->fh = ccdf->fh;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
- zpci_create_device(zdev);
+ ret = zpci_enable_device(zdev);
+ if (ret)
+ break;
+
+ pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
+ if (!pdev)
+ break;
+
+ pci_bus_add_device(pdev);
+ pci_lock_rescan_remove();
+ pci_bus_add_devices(zdev->zbus->bus);
+ pci_unlock_rescan_remove();
break;
case 0x0302: /* Reserved -> Standby */
if (!zdev) {
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index bd3f141..e83b3f1 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -45,6 +45,32 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
+/* Check that the stack and regs on entry from user mode are sane. */
+static void check_user_regs(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
+ /*
+ * Make sure that the entry code gave us a sensible EFLAGS
+ * register. Native because we want to check the actual CPU
+ * state, not the interrupt state as imagined by Xen.
+ */
+ unsigned long flags = native_save_fl();
+ WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
+ X86_EFLAGS_NT));
+
+ /* We think we came from user mode. Make sure pt_regs agrees. */
+ WARN_ON_ONCE(!user_mode(regs));
+
+ /*
+ * All entries from user mode (except #DF) should be on the
+ * normal thread stack and should have user pt_regs in the
+ * correct location.
+ */
+ WARN_ON_ONCE(!on_thread_stack());
+ WARN_ON_ONCE(regs != task_pt_regs(current));
+ }
+}
+
#ifdef CONFIG_CONTEXT_TRACKING
/**
* enter_from_user_mode - Establish state when coming from user mode
@@ -127,9 +153,6 @@ static long syscall_trace_enter(struct pt_regs *regs)
unsigned long ret = 0;
u32 work;
- if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
- BUG_ON(regs != task_pt_regs(current));
-
work = READ_ONCE(ti->flags);
if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
@@ -346,6 +369,8 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
{
struct thread_info *ti;
+ check_user_regs(regs);
+
enter_from_user_mode();
instrumentation_begin();
@@ -409,6 +434,8 @@ static void do_syscall_32_irqs_on(struct pt_regs *regs)
/* Handles int $0x80 */
__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
+ check_user_regs(regs);
+
enter_from_user_mode();
instrumentation_begin();
@@ -460,6 +487,8 @@ __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
vdso_image_32.sym_int80_landing_pad;
bool success;
+ check_user_regs(regs);
+
/*
* SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
* so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
@@ -510,6 +539,18 @@ __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
#endif
}
+
+/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
+__visible noinstr long do_SYSENTER_32(struct pt_regs *regs)
+{
+ /* SYSENTER loses RSP, but the vDSO saved it in RBP. */
+ regs->sp = regs->bp;
+
+ /* SYSENTER clobbers EFLAGS.IF. Assume it was set in usermode. */
+ regs->flags |= X86_EFLAGS_IF;
+
+ return do_fast_syscall_32(regs);
+}
#endif
SYSCALL_DEFINE0(ni_syscall)
@@ -553,6 +594,7 @@ SYSCALL_DEFINE0(ni_syscall)
bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
{
if (user_mode(regs)) {
+ check_user_regs(regs);
enter_from_user_mode();
return false;
}
@@ -686,6 +728,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
*/
void noinstr idtentry_enter_user(struct pt_regs *regs)
{
+ check_user_regs(regs);
enter_from_user_mode();
}
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 024d7d2..2d0bd5d 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -933,9 +933,8 @@
.Lsysenter_past_esp:
pushl $__USER_DS /* pt_regs->ss */
- pushl %ebp /* pt_regs->sp (stashed in bp) */
+ pushl $0 /* pt_regs->sp (placeholder) */
pushfl /* pt_regs->flags (except IF = 0) */
- orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
pushl $__USER_CS /* pt_regs->cs */
pushl $0 /* pt_regs->ip = 0 (placeholder) */
pushl %eax /* pt_regs->orig_ax */
@@ -965,7 +964,7 @@
.Lsysenter_flags_fixed:
movl %esp, %eax
- call do_fast_syscall_32
+ call do_SYSENTER_32
/* XEN PV guests always use IRET path */
ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
"jmp .Lsyscall_32_done", X86_FEATURE_XENPV
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 0f974ae..541fdaf 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -57,6 +57,21 @@
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+ /* Construct struct pt_regs on stack */
+ pushq $__USER32_DS /* pt_regs->ss */
+ pushq $0 /* pt_regs->sp = 0 (placeholder) */
+
+ /*
+ * Push flags. This is nasty. First, interrupts are currently
+ * off, but we need pt_regs->flags to have IF set. Second, if TS
+ * was set in usermode, it's still set, and we're singlestepping
+ * through this code. do_SYSENTER_32() will fix up IF.
+ */
+ pushfq /* pt_regs->flags (except IF = 0) */
+ pushq $__USER32_CS /* pt_regs->cs */
+ pushq $0 /* pt_regs->ip = 0 (placeholder) */
+SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+
/*
* User tracing code (ptrace or signal handlers) might assume that
* the saved RAX contains a 32-bit number when we're invoking a 32-bit
@@ -66,20 +81,6 @@
*/
movl %eax, %eax
- /* Construct struct pt_regs on stack */
- pushq $__USER32_DS /* pt_regs->ss */
- pushq %rbp /* pt_regs->sp (stashed in bp) */
-
- /*
- * Push flags. This is nasty. First, interrupts are currently
- * off, but we need pt_regs->flags to have IF set. Second, even
- * if TF was set when SYSENTER started, it's clear by now. We fix
- * that later using TIF_SINGLESTEP.
- */
- pushfq /* pt_regs->flags (except IF = 0) */
- orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */
- pushq $__USER32_CS /* pt_regs->cs */
- pushq $0 /* pt_regs->ip = 0 (placeholder) */
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
@@ -135,7 +136,7 @@
.Lsysenter_flags_fixed:
movq %rsp, %rdi
- call do_fast_syscall_32
+ call do_SYSENTER_32
/* XEN PV guests always use IRET path */
ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \
"jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 2bdc72e..6035df1 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -377,7 +377,8 @@ void __init hyperv_init(void)
hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
- VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, __func__);
+ VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
+ __builtin_return_address(0));
if (hv_hypercall_pg == NULL) {
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
goto remove_cpuhp_state;
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 42159f4..845e748 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -623,6 +623,11 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
* MXCSR and XCR definitions:
*/
+static inline void ldmxcsr(u32 mxcsr)
+{
+ asm volatile("ldmxcsr %0" :: "m" (mxcsr));
+}
+
extern unsigned int mxcsr_feature_mask;
#define XCR_XFEATURE_ENABLED_MASK 0x00000000
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index cf51c50..eeac6dc 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -353,10 +353,6 @@ static __always_inline void __##func(struct pt_regs *regs)
#else /* CONFIG_X86_64 */
-/* Maps to a regular IDTENTRY on 32bit for now */
-# define DECLARE_IDTENTRY_IST DECLARE_IDTENTRY
-# define DEFINE_IDTENTRY_IST DEFINE_IDTENTRY
-
/**
* DECLARE_IDTENTRY_DF - Declare functions for double fault 32bit variant
* @vector: Vector number (ignored for C)
@@ -387,28 +383,18 @@ __visible noinstr void func(struct pt_regs *regs, \
#endif /* !CONFIG_X86_64 */
/* C-Code mapping */
+#define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW
+#define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW
+
+#ifdef CONFIG_X86_64
#define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST
#define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST
#define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST
-#define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW
-#define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW
-
#define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST
#define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST
#define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST
-
-/**
- * DECLARE_IDTENTRY_XEN - Declare functions for XEN redirect IDT entry points
- * @vector: Vector number (ignored for C)
- * @func: Function name of the entry point
- *
- * Used for xennmi and xendebug redirections. No DEFINE as this is all ASM
- * indirection magic.
- */
-#define DECLARE_IDTENTRY_XEN(vector, func) \
- asmlinkage void xen_asm_exc_xen##func(void); \
- asmlinkage void asm_exc_xen##func(void)
+#endif
#else /* !__ASSEMBLY__ */
@@ -455,9 +441,6 @@ __visible noinstr void func(struct pt_regs *regs, \
# define DECLARE_IDTENTRY_MCE(vector, func) \
DECLARE_IDTENTRY(vector, func)
-# define DECLARE_IDTENTRY_DEBUG(vector, func) \
- DECLARE_IDTENTRY(vector, func)
-
/* No ASM emitted for DF as this goes through a C shim */
# define DECLARE_IDTENTRY_DF(vector, func)
@@ -469,10 +452,6 @@ __visible noinstr void func(struct pt_regs *regs, \
/* No ASM code emitted for NMI */
#define DECLARE_IDTENTRY_NMI(vector, func)
-/* XEN NMI and DB wrapper */
-#define DECLARE_IDTENTRY_XEN(vector, func) \
- idtentry vector asm_exc_xen##func exc_##func has_error_code=0
-
/*
* ASM code to emit the common vector entry stubs where each stub is
* packed into 8 bytes.
@@ -565,16 +544,28 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_BP, exc_int3);
DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF, exc_page_fault);
#ifdef CONFIG_X86_MCE
+#ifdef CONFIG_X86_64
DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check);
+#else
+DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check);
+#endif
#endif
/* NMI */
DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
-DECLARE_IDTENTRY_XEN(X86_TRAP_NMI, nmi);
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi);
+#endif
/* #DB */
+#ifdef CONFIG_X86_64
DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug);
-DECLARE_IDTENTRY_XEN(X86_TRAP_DB, debug);
+#else
+DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug);
+#endif
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug);
+#endif
/* #DF */
DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index c25a67a..0ab48f1c 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -50,6 +50,13 @@ static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
static u64 msr_test_ctrl_cache __ro_after_init;
/*
+ * With a name like MSR_TEST_CTL it should go without saying, but don't touch
+ * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it
+ * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
+ */
+static bool cpu_model_supports_sld __ro_after_init;
+
+/*
* Processors which have self-snooping capability can handle conflicting
* memory type across CPUs by snooping its own cache. However, there exists
* CPU models in which having conflicting memory types still leads to
@@ -1071,7 +1078,8 @@ static void sld_update_msr(bool on)
static void split_lock_init(void)
{
- split_lock_verify_msr(sld_state != sld_off);
+ if (cpu_model_supports_sld)
+ split_lock_verify_msr(sld_state != sld_off);
}
static void split_lock_warn(unsigned long ip)
@@ -1177,5 +1185,6 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
return;
}
+ cpu_model_supports_sld = true;
split_lock_setup();
}
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index fbe89a9..14e4b4d 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1901,6 +1901,8 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
{
+ WARN_ON_ONCE(user_mode(regs));
+
/*
* Only required when from kernel mode. See
* mce_check_crashing_cpu() for details.
@@ -1954,7 +1956,7 @@ DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
}
#else
/* 32bit unified entry point */
-DEFINE_IDTENTRY_MCE(exc_machine_check)
+DEFINE_IDTENTRY_RAW(exc_machine_check)
{
unsigned long dr7;
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 06c8189..15247b9 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -101,6 +101,12 @@ void kernel_fpu_begin(void)
copy_fpregs_to_fpstate(¤t->thread.fpu);
}
__cpu_invalidate_fpregs_state();
+
+ if (boot_cpu_has(X86_FEATURE_XMM))
+ ldmxcsr(MXCSR_DEFAULT);
+
+ if (boot_cpu_has(X86_FEATURE_FPU))
+ asm volatile ("fninit");
}
EXPORT_SYMBOL_GPL(kernel_fpu_begin);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 8748321..b8aee71 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -29,6 +29,8 @@
#include <asm/mmu_context.h>
#include <asm/pgtable_areas.h>
+#include <xen/xen.h>
+
/* This is a multiple of PAGE_SIZE. */
#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
@@ -543,6 +545,28 @@ static int read_default_ldt(void __user *ptr, unsigned long bytecount)
return bytecount;
}
+static bool allow_16bit_segments(void)
+{
+ if (!IS_ENABLED(CONFIG_X86_16BIT))
+ return false;
+
+#ifdef CONFIG_XEN_PV
+ /*
+ * Xen PV does not implement ESPFIX64, which means that 16-bit
+ * segments will not work correctly. Until either Xen PV implements
+ * ESPFIX64 and can signal this fact to the guest or unless someone
+ * provides compelling evidence that allowing broken 16-bit segments
+ * is worthwhile, disallow 16-bit segments under Xen PV.
+ */
+ if (xen_pv_domain()) {
+ pr_info_once("Warning: 16-bit segments do not work correctly in a Xen PV guest\n");
+ return false;
+ }
+#endif
+
+ return true;
+}
+
static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
{
struct mm_struct *mm = current->mm;
@@ -574,7 +598,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
/* The user wants to clear the entry. */
memset(&ldt, 0, sizeof(ldt));
} else {
- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+ if (!ldt_info.seg_32bit && !allow_16bit_segments()) {
error = -EINVAL;
goto out;
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index f58679e..b038695 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -870,6 +870,12 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
trace_hardirqs_off_finish();
/*
+ * If something gets miswired and we end up here for a user mode
+ * #DB, we will malfunction.
+ */
+ WARN_ON_ONCE(user_mode(regs));
+
+ /*
* Catch SYSENTER with TF set and clear DR_STEP. If this hit a
* watchpoint at the same time then that will still be handled.
*/
@@ -887,6 +893,12 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
static __always_inline void exc_debug_user(struct pt_regs *regs,
unsigned long dr6)
{
+ /*
+ * If something gets miswired and we end up here for a kernel mode
+ * #DB, we will malfunction.
+ */
+ WARN_ON_ONCE(!user_mode(regs));
+
idtentry_enter_user(regs);
instrumentation_begin();
@@ -917,7 +929,7 @@ DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
}
#else
/* 32 bit does not have separate entry points. */
-DEFINE_IDTENTRY_DEBUG(exc_debug)
+DEFINE_IDTENTRY_RAW(exc_debug)
{
unsigned long dr6, dr7;
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index ff2d0e9..cfe83d4 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -7,7 +7,7 @@
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
#define KVM_POSSIBLE_CR4_GUEST_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
+ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 76817d1..6d6a0ae 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4449,7 +4449,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
- nonleaf_bit8_rsvd | gbpages_bit_rsvd |
+ gbpages_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d1af20b..b266551 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4109,7 +4109,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* CR0_GUEST_HOST_MASK is already set in the original vmcs01
* (KVM doesn't change it);
*/
- vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
vmx_set_cr0(vcpu, vmcs12->host_cr0);
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
@@ -4259,7 +4259,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
*/
vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
- vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index cb22f33..13745f2 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -133,9 +133,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
-#define KVM_CR4_GUEST_OWNED_BITS \
- (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
@@ -4034,9 +4031,9 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
{
- vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
- if (enable_ept)
- vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+ vmx->vcpu.arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS;
+ if (!enable_ept)
+ vmx->vcpu.arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
if (is_guest_mode(&vmx->vcpu))
vmx->vcpu.arch.cr4_guest_owned_bits &=
~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
@@ -4333,8 +4330,8 @@ static void init_vmcs(struct vcpu_vmx *vmx)
/* 22.2.1, 20.8.1 */
vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
- vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
- vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
+ vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
set_cr4_guest_host_mask(vmx);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3b92db4..88c593f8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -975,6 +975,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
+ if ((cr4 ^ old_cr4) & X86_CR4_LA57)
+ return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
@@ -2693,6 +2695,9 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
if (data & 0x30)
return 1;
+ if (!lapic_in_kernel(vcpu))
+ return 1;
+
vcpu->arch.apf.msr_en_val = data;
if (!kvm_pv_async_pf_enabled(vcpu)) {
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index acc49fa..0d68948 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -598,6 +598,26 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
}
#ifdef CONFIG_X86_64
+void noist_exc_debug(struct pt_regs *regs);
+
+DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)
+{
+ /* On Xen PV, NMI doesn't use IST. The C part is the sane as native. */
+ exc_nmi(regs);
+}
+
+DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
+{
+ /*
+ * There's no IST on Xen PV, but we still need to dispatch
+ * to the correct handler.
+ */
+ if (user_mode(regs))
+ noist_exc_debug(regs);
+ else
+ exc_debug(regs);
+}
+
struct trap_array_entry {
void (*orig)(void);
void (*xen)(void);
@@ -609,18 +629,18 @@ struct trap_array_entry {
.xen = xen_asm_##func, \
.ist_okay = ist_ok }
-#define TRAP_ENTRY_REDIR(func, xenfunc, ist_ok) { \
+#define TRAP_ENTRY_REDIR(func, ist_ok) { \
.orig = asm_##func, \
- .xen = xen_asm_##xenfunc, \
+ .xen = xen_asm_xenpv_##func, \
.ist_okay = ist_ok }
static struct trap_array_entry trap_array[] = {
- TRAP_ENTRY_REDIR(exc_debug, exc_xendebug, true ),
+ TRAP_ENTRY_REDIR(exc_debug, true ),
TRAP_ENTRY(exc_double_fault, true ),
#ifdef CONFIG_X86_MCE
TRAP_ENTRY(exc_machine_check, true ),
#endif
- TRAP_ENTRY_REDIR(exc_nmi, exc_xennmi, true ),
+ TRAP_ENTRY_REDIR(exc_nmi, true ),
TRAP_ENTRY(exc_int3, false ),
TRAP_ENTRY(exc_overflow, false ),
#ifdef CONFIG_IA32_EMULATION
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 5d252aa..aab1d99 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -29,10 +29,9 @@
.endm
xen_pv_trap asm_exc_divide_error
-xen_pv_trap asm_exc_debug
-xen_pv_trap asm_exc_xendebug
+xen_pv_trap asm_xenpv_exc_debug
xen_pv_trap asm_exc_int3
-xen_pv_trap asm_exc_xennmi
+xen_pv_trap asm_xenpv_exc_nmi
xen_pv_trap asm_exc_overflow
xen_pv_trap asm_exc_bounds
xen_pv_trap asm_exc_invalid_op
@@ -161,10 +160,22 @@
/* 32-bit compat sysenter target */
SYM_FUNC_START(xen_sysenter_target)
- mov 0*8(%rsp), %rcx
- mov 1*8(%rsp), %r11
- mov 5*8(%rsp), %rsp
- jmp entry_SYSENTER_compat
+ /*
+ * NB: Xen is polite and clears TF from EFLAGS for us. This means
+ * that we don't need to guard against single step exceptions here.
+ */
+ popq %rcx
+ popq %r11
+
+ /*
+ * Neither Xen nor the kernel really knows what the old SS and
+ * CS were. The kernel expects __USER32_DS and __USER32_CS, so
+ * report those values even though Xen will guess its own values.
+ */
+ movq $__USER32_DS, 4*8(%rsp)
+ movq $__USER32_CS, 1*8(%rsp)
+
+ jmp entry_SYSENTER_compat_after_hwframe
SYM_FUNC_END(xen_sysenter_target)
#else /* !CONFIG_IA32_EMULATION */
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 4707e90..9ffd7e28 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -24,7 +24,8 @@ void blk_flush_integrity(void)
flush_workqueue(kintegrityd_wq);
}
-void __bio_integrity_free(struct bio_set *bs, struct bio_integrity_payload *bip)
+static void __bio_integrity_free(struct bio_set *bs,
+ struct bio_integrity_payload *bip)
{
if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
if (bip->bip_vec)
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 5fab7e3..92b996a 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -228,6 +228,7 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INT3407", 0},
{"INT3532", 0},
{"INTC1047", 0},
+ {"INTC1050", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 873e039..6287338 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -25,8 +25,8 @@ static int acpi_fan_remove(struct platform_device *pdev);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
- {"INT1044", 0},
{"INT3404", 0},
+ {"INTC1044", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index c58ea10f..0b21496 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -235,9 +235,17 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
return tpm_tis_init(&pnp_dev->dev, &tpm_info);
}
+/*
+ * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module
+ * parameter"). This commit added IFX0102 device ID, which is also used by
+ * tpm_infineon but ignored to add quirks to probe which driver ought to be
+ * used.
+ */
+
static struct pnp_device_id tpm_pnp_tbl[] = {
{"PNP0C31", 0}, /* TPM */
{"ATM1200", 0}, /* Atmel */
+ {"IFX0102", 0}, /* Infineon */
{"BCM0101", 0}, /* Broadcom */
{"BCM0102", 0}, /* Broadcom */
{"NSC1200", 0}, /* National */
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 01ce125..4126296 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -54,37 +54,11 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
dentry->d_name.name, ret > 0 ? name : "");
}
-static const struct dentry_operations dma_buf_dentry_ops = {
- .d_dname = dmabuffs_dname,
-};
-
-static struct vfsmount *dma_buf_mnt;
-
-static int dma_buf_fs_init_context(struct fs_context *fc)
-{
- struct pseudo_fs_context *ctx;
-
- ctx = init_pseudo(fc, DMA_BUF_MAGIC);
- if (!ctx)
- return -ENOMEM;
- ctx->dops = &dma_buf_dentry_ops;
- return 0;
-}
-
-static struct file_system_type dma_buf_fs_type = {
- .name = "dmabuf",
- .init_fs_context = dma_buf_fs_init_context,
- .kill_sb = kill_anon_super,
-};
-
-static int dma_buf_release(struct inode *inode, struct file *file)
+static void dma_buf_release(struct dentry *dentry)
{
struct dma_buf *dmabuf;
- if (!is_dma_buf_file(file))
- return -EINVAL;
-
- dmabuf = file->private_data;
+ dmabuf = dentry->d_fsdata;
BUG_ON(dmabuf->vmapping_counter);
@@ -110,9 +84,32 @@ static int dma_buf_release(struct inode *inode, struct file *file)
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);
+}
+
+static const struct dentry_operations dma_buf_dentry_ops = {
+ .d_dname = dmabuffs_dname,
+ .d_release = dma_buf_release,
+};
+
+static struct vfsmount *dma_buf_mnt;
+
+static int dma_buf_fs_init_context(struct fs_context *fc)
+{
+ struct pseudo_fs_context *ctx;
+
+ ctx = init_pseudo(fc, DMA_BUF_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dops = &dma_buf_dentry_ops;
return 0;
}
+static struct file_system_type dma_buf_fs_type = {
+ .name = "dmabuf",
+ .init_fs_context = dma_buf_fs_init_context,
+ .kill_sb = kill_anon_super,
+};
+
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{
struct dma_buf *dmabuf;
@@ -412,7 +409,6 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
- .release = dma_buf_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 58f9d8c..44f9276 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -204,6 +204,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 11:
+ case 12:
mem_channel_number = igp_info->v11.umachannelnumber;
/* channel width is 64 */
if (vram_width)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 16596a9..02e6f8c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2784,7 +2784,7 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
+ return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
@@ -2819,7 +2819,7 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
+ return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 6f93a6c..d016f50 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2538,10 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
- DC_ERROR("Mode validation failed for stream update!\n");
- dc_release_state(context);
- return;
+ if (update_type > UPDATE_TYPE_FAST) {
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ DC_ERROR("Mode validation failed for stream update!\n");
+ dc_release_state(context);
+ return;
+ }
}
commit_planes_for_stream(
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 2fb9755..c2e0fbb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -522,9 +522,11 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
- ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
- if (ret)
- goto err4;
+ if (adev->psp.ras.ras) {
+ ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+ if (ret)
+ goto err4;
+ }
return 0;
@@ -560,7 +562,8 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
(struct vega20_smumgr *)(hwmgr->smu_backend);
struct amdgpu_device *adev = hwmgr->adev;
- smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
+ if (adev->psp.ras.ras)
+ smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 619f814..58b89ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -61,7 +61,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- int ret;
+ int ret = 0;
if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
@@ -92,7 +92,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
if (ret)
clear_dma_max_seg_size(subdrv_dev);
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fcee33a..03be314 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1498,7 +1498,6 @@ static int g2d_probe(struct platform_device *pdev)
g2d->irq = platform_get_irq(pdev, 0);
if (g2d->irq < 0) {
- dev_err(dev, "failed to get irq\n");
ret = g2d->irq;
goto err_put_clk;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index a86abc1..3821ea7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -269,8 +269,10 @@ static void mic_pre_enable(struct drm_bridge *bridge)
goto unlock;
ret = pm_runtime_get_sync(mic->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(mic->dev);
goto unlock;
+ }
mic_set_path(mic, 1);
diff --git a/drivers/gpu/drm/i915/gt/shaders/README b/drivers/gpu/drm/i915/gt/shaders/README
new file mode 100644
index 0000000..e7e96d7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/README
@@ -0,0 +1,46 @@
+ASM sources for auto generated shaders
+======================================
+
+The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain
+pre-compiled batch chunks that will clear any residual render cache during
+context switch.
+
+They are generated from their respective platform ASM files present on
+i915/gt/shaders/clear_kernel directory.
+
+The generated .c files should never be modified directly. Instead, any modification
+needs to be done on the on their respective ASM files and build instructions below
+needes to be followed.
+
+Building
+========
+
+Environment
+-----------
+
+IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used
+on building.
+
+Please make sure your Mesa tool is compiled with "-Dtools=intel" and
+"-Ddri-drivers=i965", and run this script from IGT source root directory"
+
+The instructions bellow assume:
+ * IGT gpu tools source code is located on your home directory (~) as ~/igt
+ * Mesa source code is located on your home directory (~) as ~/mesa
+ and built under the ~/mesa/build directory
+ * Linux kernel source code is under your home directory (~) as ~/linux
+
+Instructions
+------------
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \
+ ~/igt/lib/i915/shaders/clear_kernel/ivb.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g ivb \
+ -m ~/mesa/build/src/intel/tools/i965_asm
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \
+ ~/igt/lib/i915/shaders/clear_kernel/hsw.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g hsw \
+ -m ~/mesa/build/src/intel/tools/i965_asm
\ No newline at end of file
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
new file mode 100644
index 0000000..5fdf384
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ * 1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ * 512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ * Expected to be initialized to 0 by driver/another kernel
+ * Layout:
+ * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
+(+f0.0) jmpi(1) 352D { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8) g3<1>UD 0x00000000UD { align1 1Q };
+shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
+and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
+shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
+and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
+mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
+add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
+and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
+mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
+add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
+
+mov(8) g5<1>UD 0x00000000UD { align1 1Q };
+and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
+mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
+
+mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
+mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
+
+ render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+
+/* Delay thread for specified parameter */
+add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
+(+f0.0) jmpi(1) -32D { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/* Initialize looping parameters */
+mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
+mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
+mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
+mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
+and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
+mov(16) g3<1>UD 0x00000000UD { align1 1H };
+mov(16) g4<1>UD 0x00000000UD { align1 1H };
+mov(16) g5<1>UD 0x00000000UD { align1 1H };
+mov(16) g6<1>UD 0x00000000UD { align1 1H };
+mov(16) g7<1>UD 0x00000000UD { align1 1H };
+mov(16) g8<1>UD 0x00000000UD { align1 1H };
+mov(16) g9<1>UD 0x00000000UD { align1 1H };
+mov(16) g10<1>UD 0x00000000UD { align1 1H };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
+mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
+add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
+(+f0.0) jmpi(1) -64D { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8) null<1>UD g127<8,8,1>F 0x82000010
+ thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
new file mode 100644
index 0000000..97c7ac9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ * 1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ * 512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ * Expected to be initialized to 0 by driver/another kernel
+ * Layout :
+ * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
+(+f0.0) jmpi(1) 44D { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8) g3<1>UD 0x00000000UD { align1 1Q };
+shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
+and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
+shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
+and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
+mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
+add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
+and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
+mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
+add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
+
+mov(8) g5<1>UD 0x00000000UD { align1 1Q };
+and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
+mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
+
+mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
+mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
+ render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+/* Delay thread for specified parameter */
+add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
+(+f0.0) jmpi(1) -4D { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/* Initialize looping parameters */
+mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
+mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
+mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
+mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
+and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
+mov(16) g3<1>UD 0x00000000UD { align1 1H };
+mov(16) g4<1>UD 0x00000000UD { align1 1H };
+mov(16) g5<1>UD 0x00000000UD { align1 1H };
+mov(16) g6<1>UD 0x00000000UD { align1 1H };
+mov(16) g7<1>UD 0x00000000UD { align1 1H };
+mov(16) g8<1>UD 0x00000000UD { align1 1H };
+mov(16) g9<1>UD 0x00000000UD { align1 1H };
+mov(16) g10<1>UD 0x00000000UD { align1 1H };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
+mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
+add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
+(+f0.0) jmpi(1) -8D { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8) null<1>UD g127<8,8,1>F 0x82000010
+ thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index ec47d41..62e6a14 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -66,7 +66,7 @@ static inline int mmio_diff_handler(struct intel_gvt *gvt,
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
- node = kmalloc(sizeof(*node), GFP_KERNEL);
+ node = kmalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 3e88e3b..fadd2ad 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1726,13 +1726,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
- if (data & _MASKED_BIT_ENABLE(1)) {
+ if (IS_MASKED_BITS_ENABLED(data, 1)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
- data & _MASKED_BIT_ENABLE(2)) {
+ IS_MASKED_BITS_ENABLED(data, 2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
@@ -1741,14 +1741,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc.
*/
- if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
- (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
- && !vgpu->pv_notified) {
+ if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
+ IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
+ !vgpu->pv_notified) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
- if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
- || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+ if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
+ IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
gvt_dbg_core("EXECLIST %s on ring %s\n",
@@ -1809,7 +1809,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+ if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
data |= RESET_CTL_READY_TO_RESET;
else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
data &= ~RESET_CTL_READY_TO_RESET;
@@ -1827,7 +1827,8 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
write_vreg(vgpu, offset, p_data, bytes);
- if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+ if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
+ IS_MASKED_BITS_ENABLED(data, 0x8))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
@@ -3055,6 +3056,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
@@ -3131,8 +3133,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
- MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
+ MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 970704b..3b25e7f 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -54,8 +54,8 @@ bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
-#define IS_RESTORE_INHIBIT(a) \
- (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
- ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
+
+#define IS_RESTORE_INHIBIT(a) \
+ IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 5b66e14..b88e033 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -94,6 +94,11 @@
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
+#define IS_MASKED_BITS_ENABLED(_val, _b) \
+ (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
+#define IS_MASKED_BITS_DISABLED(_val, _b) \
+ ((_val) & _MASKED_BIT_DISABLE(_b))
+
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 60f6472..6021f8d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -408,7 +408,7 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- SZ_16M + 0xfff * SZ_64K);
+ 0xfff * SZ_64K);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 096be97..21e77d6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1121,7 +1121,7 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return -ENODEV;
mmu = msm_iommu_new(gmu->dev, domain);
- gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
+ gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
if (IS_ERR(gmu->aspace)) {
iommu_domain_free(domain);
return PTR_ERR(gmu->aspace);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index a1589e0..7768557 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -893,8 +893,8 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
#endif
+ .create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a6xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 89673c7..5db06b5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -194,7 +194,7 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- 0xfffffff);
+ 0xffffffff - SZ_16M);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 63976dc..0946a86 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -521,7 +521,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
struct dpu_kms *dpu_kms,
struct drm_display_mode *mode)
{
- struct msm_display_topology topology;
+ struct msm_display_topology topology = {0};
int i, intf_count = 0;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
@@ -537,7 +537,8 @@ static struct msm_display_topology dpu_encoder_get_topology(
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
- * Adding color blocks only to primary interface
+ * Adding color blocks only to primary interface if available in
+ * sufficient number
*/
if (intf_count == 2)
topology.num_lm = 2;
@@ -546,8 +547,11 @@ static struct msm_display_topology dpu_encoder_get_topology(
else
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
- if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
- topology.num_dspp = topology.num_lm;
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
+ if (dpu_kms->catalog->dspp &&
+ (dpu_kms->catalog->dspp_count >= topology.num_lm))
+ topology.num_dspp = topology.num_lm;
+ }
topology.num_enc = 0;
topology.num_intf = intf_count;
@@ -2136,7 +2140,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
dpu_enc = to_dpu_encoder_virt(enc);
- mutex_init(&dpu_enc->enc_lock);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
if (ret)
goto fail;
@@ -2151,7 +2154,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
0);
- mutex_init(&dpu_enc->rc_lock);
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
@@ -2183,7 +2185,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
if (!dpu_enc)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
drm_enc_mode, NULL);
@@ -2196,6 +2198,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
spin_lock_init(&dpu_enc->enc_spinlock);
dpu_enc->enabled = false;
+ mutex_init(&dpu_enc->enc_lock);
+ mutex_init(&dpu_enc->rc_lock);
return &dpu_enc->base;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index b8615d4..680527e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -780,7 +780,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
aspace = msm_gem_address_space_create(mmu, "dpu1",
- 0x1000, 0xfffffff);
+ 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
mmu->funcs->destroy(mmu);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 0889718..fc6a3f8 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -514,7 +514,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
config->iommu);
aspace = msm_gem_address_space_create(mmu,
- "mdp4", 0x1000, 0xffffffff);
+ "mdp4", 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 19ec486..e193865 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
aspace = msm_gem_address_space_create(mmu, "mdp5",
- 0x1000, 0xffffffff);
+ 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 001fbf53..a1d94be 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -71,8 +71,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->flags = flags;
if (priv->gpu) {
- if (prio >= priv->gpu->nr_rings)
+ if (prio >= priv->gpu->nr_rings) {
+ kfree(queue);
return -EINVAL;
+ }
queue->prio = prio;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index ce07ddc..557cbe5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -259,9 +259,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
unsigned long reg;
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
- reg & SUN4I_HDMI_HPD_HIGH,
- 0, 500000)) {
+ reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
+ if (reg & SUN4I_HDMI_HPD_HIGH) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 0db8ef4..a270b97 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -883,7 +883,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
res = setup_attrs(resource);
if (res)
- goto exit_free;
+ goto exit_free_capability;
resource->hwmon_dev = hwmon_device_register(&device->dev);
if (IS_ERR(resource->hwmon_dev)) {
@@ -896,6 +896,8 @@ static int acpi_power_meter_add(struct acpi_device *device)
exit_remove:
remove_attrs(resource);
+exit_free_capability:
+ free_capabilities(resource);
exit_free:
kfree(resource);
exit:
diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
index 1a9772f..94698ca 100644
--- a/drivers/hwmon/bt1-pvt.c
+++ b/drivers/hwmon/bt1-pvt.c
@@ -64,7 +64,7 @@ static const struct pvt_sensor_info pvt_info[] = {
* 48380,
* where T = [-48380, 147438] mC and N = [0, 1023].
*/
-static const struct pvt_poly poly_temp_to_N = {
+static const struct pvt_poly __maybe_unused poly_temp_to_N = {
.total_divider = 10000,
.terms = {
{4, 18322, 10000, 10000},
@@ -96,7 +96,7 @@ static const struct pvt_poly poly_N_to_temp = {
* N = (18658e-3*V - 11572) / 10,
* V = N * 10^5 / 18658 + 11572 * 10^4 / 18658.
*/
-static const struct pvt_poly poly_volt_to_N = {
+static const struct pvt_poly __maybe_unused poly_volt_to_N = {
.total_divider = 10,
.terms = {
{1, 18658, 1000, 1},
@@ -300,12 +300,12 @@ static irqreturn_t pvt_soft_isr(int irq, void *data)
return IRQ_HANDLED;
}
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
{
return 0644;
}
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
{
return 0444;
}
@@ -462,12 +462,12 @@ static irqreturn_t pvt_hard_isr(int irq, void *data)
#define pvt_soft_isr NULL
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
{
return 0;
}
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
{
return 0;
}
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 743752a..64122eb 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -38,8 +38,9 @@ static const u8 MAX6697_REG_CRIT[] = {
* Map device tree / platform data register bit map to chip bit map.
* Applies to alert register and over-temperature register.
*/
-#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
+#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
(((reg) & 0x01) << 6) | ((reg) & 0x80))
+#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7))
#define MAX6697_REG_STAT(n) (0x44 + (n))
@@ -562,12 +563,12 @@ static int max6697_init_chip(struct max6697_data *data,
return ret;
ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
- MAX6697_MAP_BITS(pdata->alert_mask));
+ MAX6697_ALERT_MAP_BITS(pdata->alert_mask));
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
- MAX6697_MAP_BITS(pdata->over_temperature_mask));
+ MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask));
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index a337195..ea516ce 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -71,7 +71,7 @@
Infineon IR35221 controller.
This driver can also be built as a module. If so, the module will
- be called ir35521.
+ be called ir35221.
config SENSORS_IR38064
tristate "Infineon IR38064"
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a420877..2191575 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1869,7 +1869,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
struct pmbus_sensor *sensor;
sensor = pmbus_add_sensor(data, "fan", "target", index, page,
- PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN,
+ 0xff, PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
false, false, true);
if (!sensor)
@@ -1880,14 +1880,14 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
return 0;
sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
- PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM,
+ 0xff, PMBUS_VIRT_PWM_1 + id, PSC_PWM,
false, false, true);
if (!sensor)
return -ENOMEM;
sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
- PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM,
+ 0xff, PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
true, false, false);
if (!sensor)
@@ -1929,7 +1929,7 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
continue;
if (pmbus_add_sensor(data, "fan", "input", index,
- page, pmbus_fan_registers[f], 0xff,
+ page, 0xff, pmbus_fan_registers[f],
PSC_FAN, true, true, true) == NULL)
return -ENOMEM;
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index ef39c83..bae1dc0 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -113,11 +113,18 @@
config I2C_SLAVE
bool "I2C slave support"
+ help
+ This enables Linux to act as an I2C slave device. Note that your I2C
+ bus master driver also needs to support this functionality. Please
+ read Documentation/i2c/slave-interface.rst for further details.
if I2C_SLAVE
config I2C_SLAVE_EEPROM
tristate "I2C eeprom slave driver"
+ help
+ This backend makes Linux behave like an I2C EEPROM. Please read
+ Documentation/i2c/slave-eeprom-backend.rst for further details.
endif
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 7f10312..3889787 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -314,7 +314,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
DEB2("BUS ERROR - SDA Stuck low\n");
pca_reset(adap);
goto out;
- case 0x90: /* Bus error - SCL stuck low */
+ case 0x78: /* Bus error - SCL stuck low (PCA9665) */
+ case 0x90: /* Bus error - SCL stuck low (PCA9564) */
DEB2("BUS ERROR - SCL Stuck low\n");
pca_reset(adap);
goto out;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index c2efaaa..a71bc58f 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i2c.h>
@@ -191,6 +192,17 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev)
return ret;
}
+static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = {
+ {
+ .ident = "Qtechnology QT5222",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Qtechnology"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "QT5222"),
+ },
+ },
+ { } /* terminate list */
+};
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -267,7 +279,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
adap = &dev->adapter;
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DEPRECATED;
+ adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ?
+ I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
adap->nr = -1;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index bb810de..73f1396 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -180,6 +180,7 @@ static const struct pci_device_id pch_pcidev_id[] = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
{0,}
};
+MODULE_DEVICE_TABLE(pci, pch_pcidev_id);
static irqreturn_t pch_i2c_handler(int irq, void *pData);
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 2fd717d..71d7bae 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv)
if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) {
mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
&datalen, 1);
- if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) {
+ if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) {
dev_err(priv->dev, "Incorrect smbus block read message len\n");
- return -E2BIG;
+ return -EPROTO;
}
} else {
datalen = priv->xfer.data_len;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 29fead2..216b3b8 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -563,7 +563,7 @@
Support for the Loongson PCH PIC Controller.
config LOONGSON_PCH_MSI
- bool "Loongson PCH PIC Controller"
+ bool "Loongson PCH MSI Controller"
depends on MACH_LOONGSON64 || COMPILE_TEST
depends on PCI
default MACH_LOONGSON64
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index cd685f5..beac4ca 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3797,10 +3797,10 @@ static void its_wait_vpt_parse_complete(void)
if (!gic_rdists->has_vpend_valid_dirty)
return;
- WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
- val,
- !(val & GICR_VPENDBASER_Dirty),
- 10, 500));
+ WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
+ val,
+ !(val & GICR_VPENDBASER_Dirty),
+ 10, 500));
}
static void its_vpe_schedule(struct its_vpe *vpe)
@@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
u64 val;
if (info->req_db) {
+ unsigned long flags;
+
/*
* vPE is going to block: make the vPE non-resident with
* PendingLast clear and DB set. The GIC guarantees that if
* we read-back PendingLast clear, then a doorbell will be
* delivered when an interrupt comes.
+ *
+ * Note the locking to deal with the concurrent update of
+ * pending_last from the doorbell interrupt handler that can
+ * run concurrently.
*/
+ raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
val = its_clear_vpend_valid(vlpi_base,
GICR_VPENDBASER_PendingLast,
GICR_VPENDBASER_4_1_DB);
vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+ raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
} else {
/*
* We're not blocking, so just make the vPE non-resident
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 00de05a..c17fabd 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -329,10 +329,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
- unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
- u32 val, mask, bit;
- unsigned long flags;
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ unsigned int cpu;
if (!force)
cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -342,13 +340,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
- gic_lock_irqsave(flags);
- mask = 0xff << shift;
- bit = gic_cpu_map[cpu] << shift;
- val = readl_relaxed(reg) & ~mask;
- writel_relaxed(val | bit, reg);
- gic_unlock_irqrestore(flags);
-
+ writeb_relaxed(gic_cpu_map[cpu], reg);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE;
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index a6f97fa..8017f6d 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -99,7 +99,7 @@ static int __init riscv_intc_init(struct device_node *node,
hartid = riscv_of_parent_hartid(node);
if (hartid < 0) {
- pr_warn("unable to fine hart id for %pOF\n", node);
+ pr_warn("unable to find hart id for %pOF\n", node);
return 0;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 68aea22..5216487 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1324,13 +1324,13 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
return 0; /* fw doesn't need any host buffers */
/* spin till we get enough memory */
- while(host_page_buffer_sz > 0) {
-
- if((ioc->HostPageBuffer = pci_alloc_consistent(
- ioc->pcidev,
- host_page_buffer_sz,
- &ioc->HostPageBuffer_dma)) != NULL) {
-
+ while (host_page_buffer_sz > 0) {
+ ioc->HostPageBuffer =
+ dma_alloc_coherent(&ioc->pcidev->dev,
+ host_page_buffer_sz,
+ &ioc->HostPageBuffer_dma,
+ GFP_KERNEL);
+ if (ioc->HostPageBuffer) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
@@ -2741,8 +2741,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
sz = ioc->alloc_sz;
dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n",
ioc->name, ioc->alloc, ioc->alloc_sz));
- pci_free_consistent(ioc->pcidev, sz,
- ioc->alloc, ioc->alloc_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+ ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc = NULL;
@@ -2751,8 +2751,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- pci_free_consistent(ioc->pcidev, sz,
- ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+ ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
ioc->alloc_total -= sz;
}
@@ -2802,7 +2802,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
"HostPageBuffer free @ %p, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
ioc->HostPageBuffer_sz));
- pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
+ dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz,
ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
ioc->HostPageBuffer = NULL;
ioc->HostPageBuffer_sz = 0;
@@ -4497,7 +4497,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->name, sz, sz, num_chain));
total_size += sz;
- mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
+ mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size,
+ &alloc_dma, GFP_KERNEL);
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
ioc->name);
@@ -4574,8 +4575,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- ioc->sense_buf_pool =
- pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
+ ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz,
+ &ioc->sense_buf_pool_dma, GFP_KERNEL);
if (ioc->sense_buf_pool == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
ioc->name);
@@ -4613,18 +4614,16 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
- pci_free_consistent(ioc->pcidev,
- sz,
- ioc->alloc, ioc->alloc_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+ ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc_total -= sz;
}
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- pci_free_consistent(ioc->pcidev,
- sz,
- ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+ ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 76d832a..7d93056 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1273,8 +1273,8 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
return -EROFS;
if (!len)
return 0;
- if (!mtd->oops_panic_write)
- mtd->oops_panic_write = true;
+ if (!master->oops_panic_write)
+ master->oops_panic_write = true;
return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
retlen, buf);
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 0a5cb77..f5a53aa 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -1761,7 +1761,7 @@ static void ns_switch_state(struct nandsim *ns)
NS_DBG("switch_state: operation is unknown, try to find it\n");
- if (!ns_find_operation(ns, 0))
+ if (ns_find_operation(ns, 0))
return;
if ((ns->state & ACTION_MASK) &&
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 94bfba9..2925547 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -224,7 +224,7 @@ static int xway_nand_remove(struct platform_device *pdev)
struct nand_chip *chip = &data->chip;
int ret;
- ret = mtd_device_unregister(mtd);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 28f4388c..8410d03 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1116,10 +1116,16 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
dev_warn(ctrl->device,
"Identify Descriptors failed (%d)\n", status);
/*
- * Don't treat an error as fatal, as we potentially already
- * have a NGUID or EUI-64.
+ * Don't treat non-retryable errors as fatal, as we potentially
+ * already have a NGUID or EUI-64. If we failed with DNR set,
+ * we want to silently ignore the error as we can still
+ * identify the device, but if the status has DNR set, we want
+ * to propagate the error back specifically for the disk
+ * revalidation flow to make sure we don't abandon the
+ * device just because of a temporal retry-able error (such
+ * as path of transport errors).
*/
- if (status > 0 && !(status & NVME_SC_DNR))
+ if (status > 0 && (status & NVME_SC_DNR))
status = 0;
goto free_data;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 18d084e..6650947 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -672,10 +672,11 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
}
if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
- struct backing_dev_info *info =
- ns->head->disk->queue->backing_dev_info;
+ struct gendisk *disk = ns->head->disk;
- info->capabilities |= BDI_CAP_STABLE_WRITES;
+ if (disk)
+ disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_STABLE_WRITES;
}
}
diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c
index a646fc8..13b26a1 100644
--- a/drivers/s390/cio/vfio_ccw_chp.c
+++ b/drivers/s390/cio/vfio_ccw_chp.c
@@ -8,6 +8,7 @@
* Eric Farman <[email protected]>
*/
+#include <linux/slab.h>
#include <linux/vfio.h>
#include "vfio_ccw_private.h"
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 773c45a..278d15f 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -133,8 +133,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
lockdep_assert_held(&lport->disc.disc_mutex);
rdata = fc_rport_lookup(lport, port_id);
- if (rdata)
+ if (rdata) {
+ kref_put(&rdata->kref, fc_rport_destroy);
return rdata;
+ }
if (lport->rport_priv_size > 0)
rport_priv_size = lport->rport_priv_size;
@@ -481,10 +483,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
- kref_get(&rdata->kref);
- if (rdata->event == RPORT_EV_NONE &&
- !queue_work(rport_event_queue, &rdata->event_work))
- kref_put(&rdata->kref, fc_rport_destroy);
+ if (rdata->event == RPORT_EV_NONE) {
+ kref_get(&rdata->kref);
+ if (!queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
rdata->event = event;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4576d3a..2436a17 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5944,7 +5944,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
break;
}
- if (NVME_TARGET(vha->hw, fcport)) {
+ if (found && NVME_TARGET(vha->hw, fcport)) {
if (fcport->disc_state == DSC_DELETE_PEND) {
qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
vha->fcport_count--;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 040d2a4..4f168b4 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -69,11 +69,27 @@ struct xenbus_map_node {
unsigned int nr_handles;
};
+struct map_ring_valloc {
+ struct xenbus_map_node *node;
+
+ /* Why do we need two arrays? See comment of __xenbus_map_ring */
+ union {
+ unsigned long addrs[XENBUS_MAX_RING_GRANTS];
+ pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+ };
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+
+ struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+
+ unsigned int idx; /* HVM only. */
+};
+
static DEFINE_SPINLOCK(xenbus_valloc_lock);
static LIST_HEAD(xenbus_valloc_pages);
struct xenbus_ring_ops {
- int (*map)(struct xenbus_device *dev,
+ int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
grant_ref_t *gnt_refs, unsigned int nr_grefs,
void **vaddr);
int (*unmap)(struct xenbus_device *dev, void *vaddr);
@@ -440,8 +456,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
* Map @nr_grefs pages of memory into this domain from another
* domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
* pages of virtual address space, maps the pages to that address, and
- * sets *vaddr to that address. Returns 0 on success, and GNTST_*
- * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
+ * sets *vaddr to that address. Returns 0 on success, and -errno on
* error. If an error is returned, device will switch to
* XenbusStateClosing and the error message will be saved in XenStore.
*/
@@ -449,12 +464,25 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr)
{
int err;
+ struct map_ring_valloc *info;
- err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
- /* Some hypervisors are buggy and can return 1. */
- if (err > 0)
- err = GNTST_general_error;
+ *vaddr = NULL;
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+ return -EINVAL;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
+ if (!info->node)
+ err = -ENOMEM;
+ else
+ err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
+
+ kfree(info->node);
+ kfree(info);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
@@ -466,62 +494,57 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
grant_ref_t *gnt_refs,
unsigned int nr_grefs,
grant_handle_t *handles,
- phys_addr_t *addrs,
+ struct map_ring_valloc *info,
unsigned int flags,
bool *leaked)
{
- struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
int i, j;
- int err = GNTST_okay;
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
for (i = 0; i < nr_grefs; i++) {
- memset(&map[i], 0, sizeof(map[i]));
- gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
- dev->otherend_id);
+ gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
+ gnt_refs[i], dev->otherend_id);
handles[i] = INVALID_GRANT_HANDLE;
}
- gnttab_batch_map(map, i);
+ gnttab_batch_map(info->map, i);
for (i = 0; i < nr_grefs; i++) {
- if (map[i].status != GNTST_okay) {
- err = map[i].status;
- xenbus_dev_fatal(dev, map[i].status,
+ if (info->map[i].status != GNTST_okay) {
+ xenbus_dev_fatal(dev, info->map[i].status,
"mapping in shared page %d from domain %d",
gnt_refs[i], dev->otherend_id);
goto fail;
} else
- handles[i] = map[i].handle;
+ handles[i] = info->map[i].handle;
}
- return GNTST_okay;
+ return 0;
fail:
for (i = j = 0; i < nr_grefs; i++) {
if (handles[i] != INVALID_GRANT_HANDLE) {
- memset(&unmap[j], 0, sizeof(unmap[j]));
- gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
+ gnttab_set_unmap_op(&info->unmap[j],
+ info->phys_addrs[i],
GNTMAP_host_map, handles[i]);
j++;
}
}
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
BUG();
*leaked = false;
for (i = 0; i < j; i++) {
- if (unmap[i].status != GNTST_okay) {
+ if (info->unmap[i].status != GNTST_okay) {
*leaked = true;
break;
}
}
- return err;
+ return -ENOENT;
}
/**
@@ -566,21 +589,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
return err;
}
-struct map_ring_valloc_hvm
-{
- unsigned int idx;
-
- /* Why do we need two arrays? See comment of __xenbus_map_ring */
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
- unsigned long addrs[XENBUS_MAX_RING_GRANTS];
-};
-
static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
unsigned int goffset,
unsigned int len,
void *data)
{
- struct map_ring_valloc_hvm *info = data;
+ struct map_ring_valloc *info = data;
unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
info->phys_addrs[info->idx] = vaddr;
@@ -589,39 +603,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
info->idx++;
}
-static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
- grant_ref_t *gnt_ref,
- unsigned int nr_grefs,
- void **vaddr)
+static int xenbus_map_ring_hvm(struct xenbus_device *dev,
+ struct map_ring_valloc *info,
+ grant_ref_t *gnt_ref,
+ unsigned int nr_grefs,
+ void **vaddr)
{
- struct xenbus_map_node *node;
+ struct xenbus_map_node *node = info->node;
int err;
void *addr;
bool leaked = false;
- struct map_ring_valloc_hvm info = {
- .idx = 0,
- };
unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- *vaddr = NULL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
if (err)
goto out_err;
gnttab_foreach_grant(node->hvm.pages, nr_grefs,
xenbus_map_ring_setup_grant_hvm,
- &info);
+ info);
err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
- info.phys_addrs, GNTMAP_host_map, &leaked);
+ info, GNTMAP_host_map, &leaked);
node->nr_handles = nr_grefs;
if (err)
@@ -641,11 +644,13 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
spin_unlock(&xenbus_valloc_lock);
*vaddr = addr;
+ info->node = NULL;
+
return 0;
out_xenbus_unmap_ring:
if (!leaked)
- xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
+ xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
else
pr_alert("leaking %p size %u page(s)",
addr, nr_pages);
@@ -653,7 +658,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
if (!leaked)
free_xenballooned_pages(nr_pages, node->hvm.pages);
out_err:
- kfree(node);
return err;
}
@@ -676,40 +680,30 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
#ifdef CONFIG_XEN_PV
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
- grant_ref_t *gnt_refs,
- unsigned int nr_grefs,
- void **vaddr)
+static int xenbus_map_ring_pv(struct xenbus_device *dev,
+ struct map_ring_valloc *info,
+ grant_ref_t *gnt_refs,
+ unsigned int nr_grefs,
+ void **vaddr)
{
- struct xenbus_map_node *node;
+ struct xenbus_map_node *node = info->node;
struct vm_struct *area;
- pte_t *ptes[XENBUS_MAX_RING_GRANTS];
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
int err = GNTST_okay;
int i;
bool leaked;
- *vaddr = NULL;
-
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
+ area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
if (!area) {
kfree(node);
return -ENOMEM;
}
for (i = 0; i < nr_grefs; i++)
- phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+ info->phys_addrs[i] =
+ arbitrary_virt_to_machine(info->ptes[i]).maddr;
err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
- phys_addrs,
- GNTMAP_host_map | GNTMAP_contains_pte,
+ info, GNTMAP_host_map | GNTMAP_contains_pte,
&leaked);
if (err)
goto failed;
@@ -722,6 +716,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
spin_unlock(&xenbus_valloc_lock);
*vaddr = area->addr;
+ info->node = NULL;
+
return 0;
failed:
@@ -730,11 +726,10 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
else
pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
- kfree(node);
return err;
}
-static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
{
struct xenbus_map_node *node;
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
@@ -798,12 +793,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
}
static const struct xenbus_ring_ops ring_ops_pv = {
- .map = xenbus_map_ring_valloc_pv,
- .unmap = xenbus_unmap_ring_vfree_pv,
+ .map = xenbus_map_ring_pv,
+ .unmap = xenbus_unmap_ring_pv,
};
#endif
-struct unmap_ring_vfree_hvm
+struct unmap_ring_hvm
{
unsigned int idx;
unsigned long addrs[XENBUS_MAX_RING_GRANTS];
@@ -814,19 +809,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
unsigned int len,
void *data)
{
- struct unmap_ring_vfree_hvm *info = data;
+ struct unmap_ring_hvm *info = data;
info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
info->idx++;
}
-static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
{
int rv;
struct xenbus_map_node *node;
void *addr;
- struct unmap_ring_vfree_hvm info = {
+ struct unmap_ring_hvm info = {
.idx = 0,
};
unsigned int nr_pages;
@@ -887,8 +882,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
static const struct xenbus_ring_ops ring_ops_hvm = {
- .map = xenbus_map_ring_valloc_hvm,
- .unmap = xenbus_unmap_ring_vfree_hvm,
+ .map = xenbus_map_ring_hvm,
+ .unmap = xenbus_unmap_ring_hvm,
};
void __init xenbus_ring_ops_init(void)
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 3a7648b..82ab6e5 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1196,7 +1196,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
switch (tm->op) {
case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
BUG_ON(tm->slot < n);
- /* Fallthrough */
+ fallthrough;
case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
case MOD_LOG_KEY_REMOVE:
btrfs_set_node_key(eb, &tm->key, tm->slot);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 7c6f0bb..b1a1480 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2593,10 +2593,12 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
!extent_buffer_uptodate(tree_root->node)) {
handle_error = true;
- if (IS_ERR(tree_root->node))
+ if (IS_ERR(tree_root->node)) {
ret = PTR_ERR(tree_root->node);
- else if (!extent_buffer_uptodate(tree_root->node))
+ tree_root->node = NULL;
+ } else if (!extent_buffer_uptodate(tree_root->node)) {
ret = -EUCLEAN;
+ }
btrfs_warn(fs_info, "failed to read tree root");
continue;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 68c9605..608f934 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5058,25 +5058,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
int refs;
- /* the ref bit is tricky. We have to make sure it is set
- * if we have the buffer dirty. Otherwise the
- * code to free a buffer can end up dropping a dirty
- * page
+ /*
+ * The TREE_REF bit is first set when the extent_buffer is added
+ * to the radix tree. It is also reset, if unset, when a new reference
+ * is created by find_extent_buffer.
*
- * Once the ref bit is set, it won't go away while the
- * buffer is dirty or in writeback, and it also won't
- * go away while we have the reference count on the
- * eb bumped.
+ * It is only cleared in two cases: freeing the last non-tree
+ * reference to the extent_buffer when its STALE bit is set or
+ * calling releasepage when the tree reference is the only reference.
*
- * We can't just set the ref bit without bumping the
- * ref on the eb because free_extent_buffer might
- * see the ref bit and try to clear it. If this happens
- * free_extent_buffer might end up dropping our original
- * ref by mistake and freeing the page before we are able
- * to add one more ref.
+ * In both cases, care is taken to ensure that the extent_buffer's
+ * pages are not under io. However, releasepage can be concurrently
+ * called with creating new references, which is prone to race
+ * conditions between the calls to check_buffer_tree_ref in those
+ * codepaths and clearing TREE_REF in try_release_extent_buffer.
*
- * So bump the ref count first, then set the bit. If someone
- * beat us to it, drop the ref we added.
+ * The actual lifetime of the extent_buffer in the radix tree is
+ * adequately protected by the refcount, but the TREE_REF bit and
+ * its corresponding reference are not. To protect against this
+ * class of races, we call check_buffer_tree_ref from the codepaths
+ * which trigger io after they set eb->io_pages. Note that once io is
+ * initiated, TREE_REF can no longer be cleared, so that is the
+ * moment at which any such race is best fixed.
*/
refs = atomic_read(&eb->refs);
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
@@ -5527,6 +5530,11 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
atomic_set(&eb->io_pages, num_reads);
+ /*
+ * It is possible for releasepage to clear the TREE_REF bit before we
+ * set io_pages. See check_buffer_tree_ref for a more detailed comment.
+ */
+ check_buffer_tree_ref(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 7887317..af92525 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -509,7 +509,7 @@ static int process_leaf(struct btrfs_root *root,
switch (key.type) {
case BTRFS_EXTENT_ITEM_KEY:
*num_bytes = key.offset;
- /* fall through */
+ fallthrough;
case BTRFS_METADATA_ITEM_KEY:
*bytenr = key.objectid;
ret = process_extent_item(fs_info, path, &key, i,
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 41ee886..c7bd3fd 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -879,8 +879,8 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
return false;
}
global_rsv->reserved -= ticket->bytes;
+ remove_ticket(space_info, ticket);
ticket->bytes = 0;
- list_del_init(&ticket->list);
wake_up(&ticket->wait);
space_info->tickets_id++;
if (global_rsv->reserved < global_rsv->size)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index bc73fd6..c3826ae 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -523,7 +523,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_compress_force:
case Opt_compress_force_type:
compress_force = true;
- /* Fallthrough */
+ fallthrough;
case Opt_compress:
case Opt_compress_type:
saved_compress_type = btrfs_test_opt(info,
@@ -622,7 +622,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
btrfs_set_opt(info->mount_opt, NOSSD);
btrfs_clear_and_info(info, SSD,
"not using ssd optimizations");
- /* Fallthrough */
+ fallthrough;
case Opt_nossd_spread:
btrfs_clear_and_info(info, SSD_SPREAD,
"not using spread ssd allocation scheme");
@@ -793,7 +793,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_recovery:
btrfs_warn(info,
"'recovery' is deprecated, use 'usebackuproot' instead");
- /* fall through */
+ fallthrough;
case Opt_usebackuproot:
btrfs_info(info,
"trying to use backup root at mount time");
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index f067b59..75af233 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -408,7 +408,7 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
return BTRFS_MAP_WRITE;
default:
WARN_ON_ONCE(1);
- /* fall through */
+ fallthrough;
case REQ_OP_READ:
return BTRFS_MAP_READ;
}
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index fc98b97..53588d7 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -399,6 +399,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
if (ses->sign)
seq_puts(m, " signed");
+ seq_printf(m, "\n\tUser: %d Cred User: %d",
+ from_kuid(&init_user_ns, ses->linux_uid),
+ from_kuid(&init_user_ns, ses->cred_uid));
+
if (ses->chan_count > 1) {
seq_printf(m, "\n\n\tExtra Channels: %zu\n",
ses->chan_count-1);
@@ -406,7 +410,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
cifs_dump_channel(m, j, &ses->chans[j]);
}
- seq_puts(m, "\n\tShares:");
+ seq_puts(m, "\n\n\tShares:");
j = 0;
seq_printf(m, "\n\t%d) IPC: ", j);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5fac34f..a61abde 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -5306,9 +5306,15 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
vol_info->nocase = master_tcon->nocase;
vol_info->nohandlecache = master_tcon->nohandlecache;
vol_info->local_lease = master_tcon->local_lease;
+ vol_info->no_lease = master_tcon->no_lease;
+ vol_info->resilient = master_tcon->use_resilient;
+ vol_info->persistent = master_tcon->use_persistent;
+ vol_info->handle_timeout = master_tcon->handle_timeout;
vol_info->no_linux_ext = !master_tcon->unix_ext;
+ vol_info->linux_ext = master_tcon->posix_extensions;
vol_info->sectype = master_tcon->ses->sectype;
vol_info->sign = master_tcon->ses->sign;
+ vol_info->seal = master_tcon->seal;
rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
if (rc) {
@@ -5334,10 +5340,6 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
goto out;
}
- /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
- if (tcon->posix_extensions)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-
if (cap_unix(ses))
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ce95801..49c3ea8 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2044,6 +2044,7 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
FILE_UNIX_BASIC_INFO *info_buf_target;
unsigned int xid;
int rc, tmprc;
+ bool new_target = d_really_is_negative(target_dentry);
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
@@ -2120,8 +2121,13 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
*/
unlink_target:
- /* Try unlinking the target dentry if it's not negative */
- if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
+ /*
+ * If the target dentry was created during the rename, try
+ * unlinking it if it's not negative
+ */
+ if (new_target &&
+ d_really_is_positive(target_dentry) &&
+ (rc == -EACCES || rc == -EEXIST)) {
if (d_is_dir(target_dentry))
tmprc = cifs_rmdir(target_dir, target_dentry);
else
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index d11e310..84433d0 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -523,7 +523,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
const int timeout, const int flags,
unsigned int *instance)
{
- int rc;
+ long rc;
int *credits;
int optype;
long int t;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 2299dcc..8545024 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1899,7 +1899,10 @@ bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
static void flush_delete_work(struct gfs2_glock *gl)
{
- flush_delayed_work(&gl->gl_delete);
+ if (cancel_delayed_work(&gl->gl_delete)) {
+ queue_delayed_work(gfs2_delete_workqueue,
+ &gl->gl_delete, 0);
+ }
gfs2_glock_queue_work(gl, 0);
}
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index c848877..de1d5f1 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -531,8 +531,7 @@ static int freeze_go_sync(struct gfs2_glock *gl)
int error = 0;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
- test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+ if (gl->gl_req == LM_ST_EXCLUSIVE && !gfs2_withdrawn(sdp)) {
atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
error = freeze_super(sdp->sd_vfs);
if (error) {
@@ -545,8 +544,11 @@ static int freeze_go_sync(struct gfs2_glock *gl)
gfs2_assert_withdraw(sdp, 0);
}
queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
- gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
- GFS2_LFC_FREEZE_GO_SYNC);
+ if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+ GFS2_LFC_FREEZE_GO_SYNC);
+ else /* read-only mounts */
+ atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
}
return 0;
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 03ab11f..ca2ec024 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -399,7 +399,6 @@ enum {
GIF_QD_LOCKED = 1,
GIF_ALLOC_FAILED = 2,
GIF_SW_PAGED = 3,
- GIF_ORDERED = 4,
GIF_FREE_VFS_INODE = 5,
GIF_GLOP_PENDING = 6,
GIF_DEFERRED_DELETE = 7,
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 370c3a4..6774865 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -207,10 +207,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (no_formal_ino && ip->i_no_formal_ino &&
no_formal_ino != ip->i_no_formal_ino) {
+ error = -ESTALE;
if (inode->i_state & I_NEW)
goto fail;
iput(inode);
- return ERR_PTR(-ESTALE);
+ return ERR_PTR(error);
}
if (inode->i_state & I_NEW)
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 3e47344..a76e55b 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -613,6 +613,12 @@ static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
return 0;
}
+static void __ordered_del_inode(struct gfs2_inode *ip)
+{
+ if (!list_empty(&ip->i_ordered))
+ list_del_init(&ip->i_ordered);
+}
+
static void gfs2_ordered_write(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip;
@@ -623,8 +629,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
while (!list_empty(&sdp->sd_log_ordered)) {
ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
if (ip->i_inode.i_mapping->nrpages == 0) {
- test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
- list_del(&ip->i_ordered);
+ __ordered_del_inode(ip);
continue;
}
list_move(&ip->i_ordered, &written);
@@ -643,8 +648,7 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
spin_lock(&sdp->sd_ordered_lock);
while (!list_empty(&sdp->sd_log_ordered)) {
ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
- list_del(&ip->i_ordered);
- WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
+ __ordered_del_inode(ip);
if (ip->i_inode.i_mapping->nrpages == 0)
continue;
spin_unlock(&sdp->sd_ordered_lock);
@@ -659,8 +663,7 @@ void gfs2_ordered_del_inode(struct gfs2_inode *ip)
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
spin_lock(&sdp->sd_ordered_lock);
- if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
- list_del(&ip->i_ordered);
+ __ordered_del_inode(ip);
spin_unlock(&sdp->sd_ordered_lock);
}
@@ -1002,6 +1005,16 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
out:
if (gfs2_withdrawn(sdp)) {
+ /**
+ * If the tr_list is empty, we're withdrawing during a log
+ * flush that targets a transaction, but the transaction was
+ * never queued onto any of the ail lists. Here we add it to
+ * ail1 just so that ail_drain() will find and free it.
+ */
+ spin_lock(&sdp->sd_ail_lock);
+ if (tr && list_empty(&tr->tr_list))
+ list_add(&tr->tr_list, &sdp->sd_ail1_list);
+ spin_unlock(&sdp->sd_ail_lock);
ail_drain(sdp); /* frees all transactions */
tr = NULL;
}
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index c1cd6ae1..8965c75 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -53,9 +53,9 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
if (gfs2_is_jdata(ip) || !gfs2_is_ordered(sdp))
return;
- if (!test_bit(GIF_ORDERED, &ip->i_flags)) {
+ if (list_empty(&ip->i_ordered)) {
spin_lock(&sdp->sd_ordered_lock);
- if (!test_and_set_bit(GIF_ORDERED, &ip->i_flags))
+ if (list_empty(&ip->i_ordered))
list_add(&ip->i_ordered, &sdp->sd_log_ordered);
spin_unlock(&sdp->sd_ordered_lock);
}
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 733470c..c7393ee 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -39,6 +39,7 @@ static void gfs2_init_inode_once(void *foo)
atomic_set(&ip->i_sizehint, 0);
init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
+ INIT_LIST_HEAD(&ip->i_ordered);
ip->i_qadata = NULL;
gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
memset(&ip->i_res, 0, sizeof(ip->i_res));
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 094f5fe7..6d18d2c 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1136,7 +1136,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
goto fail_per_node;
}
- if (!sb_rdonly(sb)) {
+ if (sb_rdonly(sb)) {
+ struct gfs2_holder freeze_gh;
+
+ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+ LM_FLAG_NOEXP | GL_EXACT,
+ &freeze_gh);
+ if (error) {
+ fs_err(sdp, "can't make FS RO: %d\n", error);
+ goto fail_per_node;
+ }
+ gfs2_glock_dq_uninit(&freeze_gh);
+ } else {
error = gfs2_make_fs_rw(sdp);
if (error) {
fs_err(sdp, "can't make FS RW: %d\n", error);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 96c345f..390ea79 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -364,8 +364,8 @@ void gfs2_recover_func(struct work_struct *work)
/* Acquire a shared hold on the freeze lock */
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | LM_FLAG_PRIORITY,
- &thaw_gh);
+ LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
+ GL_EXACT, &thaw_gh);
if (error)
goto fail_gunlock_ji;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 32d8d26..47d0ae1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -167,7 +167,8 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
if (error)
return error;
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
+ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+ LM_FLAG_NOEXP | GL_EXACT,
&freeze_gh);
if (error)
goto fail_threads;
@@ -203,7 +204,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
return 0;
fail:
- freeze_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&freeze_gh);
fail_threads:
if (sdp->sd_quotad_process)
@@ -430,7 +430,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
}
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
- GL_NOCACHE, &sdp->sd_freeze_gh);
+ LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
if (error)
goto out;
@@ -613,13 +613,15 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
!gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
if (!log_write_allowed) {
error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
- LM_ST_SHARED, GL_NOCACHE |
- LM_FLAG_TRY, &freeze_gh);
+ LM_ST_SHARED, LM_FLAG_TRY |
+ LM_FLAG_NOEXP | GL_EXACT,
+ &freeze_gh);
if (error == GLR_TRYFAILED)
error = 0;
} else {
error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
- LM_ST_SHARED, GL_NOCACHE,
+ LM_ST_SHARED,
+ LM_FLAG_NOEXP | GL_EXACT,
&freeze_gh);
if (error && !gfs2_withdrawn(sdp))
return error;
@@ -761,8 +763,8 @@ void gfs2_freeze_func(struct work_struct *work)
struct super_block *sb = sdp->sd_vfs;
atomic_inc(&sb->s_active);
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
- &freeze_gh);
+ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+ LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
if (error) {
fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
gfs2_assert_withdraw(sdp, 0);
@@ -774,8 +776,6 @@ void gfs2_freeze_func(struct work_struct *work)
error);
gfs2_assert_withdraw(sdp, 0);
}
- if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
- freeze_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&freeze_gh);
}
deactivate_super(sb);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 700644a0..d37d7ea 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4072,14 +4072,22 @@ struct io_poll_table {
int error;
};
-static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
- int notify)
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
{
struct task_struct *tsk = req->task;
- int ret;
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret, notify = TWA_RESUME;
- if (req->ctx->flags & IORING_SETUP_SQPOLL)
+ /*
+ * SQPOLL kernel thread doesn't need notification, just a wakeup.
+ * If we're not using an eventfd, then TWA_RESUME is always fine,
+ * as we won't have dependencies between request completions for
+ * other kernel wait conditions.
+ */
+ if (ctx->flags & IORING_SETUP_SQPOLL)
notify = 0;
+ else if (ctx->cq_ev_fd)
+ notify = TWA_SIGNAL;
ret = task_work_add(tsk, cb, notify);
if (!ret)
@@ -4110,7 +4118,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
- ret = io_req_task_work_add(req, &req->task_work, TWA_SIGNAL);
+ ret = io_req_task_work_add(req, &req->task_work);
if (unlikely(ret)) {
WRITE_ONCE(poll->canceled, true);
tsk = io_wq_get_task(req->ctx->io_wq);
@@ -6201,7 +6209,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (current->task_works)
task_work_run();
if (signal_pending(current)) {
- ret = -ERESTARTSYS;
+ if (current->jobctl & JOBCTL_TASK_WORK) {
+ spin_lock_irq(¤t->sighand->siglock);
+ current->jobctl &= ~JOBCTL_TASK_WORK;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+ continue;
+ }
+ ret = -EINTR;
break;
}
if (io_should_wake(&iowq, false))
@@ -6210,7 +6225,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
} while (1);
finish_wait(&ctx->wait, &iowq.wq);
- restore_saved_sigmask_unless(ret == -ERESTARTSYS);
+ restore_saved_sigmask_unless(ret == -EINTR);
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
}
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 42c5128..6c1166c 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -566,8 +566,9 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
goto out;
/* don't even try if the size is too large */
- if (count > KMALLOC_MAX_SIZE)
- return -ENOMEM;
+ error = -ENOMEM;
+ if (count >= KMALLOC_MAX_SIZE)
+ goto out;
if (write) {
kbuf = memdup_user_nul(ubuf, count);
@@ -576,7 +577,6 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
goto out;
}
} else {
- error = -ENOMEM;
kbuf = kzalloc(count, GFP_KERNEL);
if (!kbuf)
goto out;
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index b43f0e8..9ed9036 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -671,7 +671,8 @@ xlog_cil_push_work(
/*
* Wake up any background push waiters now this context is being pushed.
*/
- wake_up_all(&ctx->push_wait);
+ if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
+ wake_up_all(&cil->xc_push_wait);
/*
* Check if we've anything to push. If there is nothing, then we don't
@@ -743,13 +744,12 @@ xlog_cil_push_work(
/*
* initialise the new context and attach it to the CIL. Then attach
- * the current context to the CIL committing lsit so it can be found
+ * the current context to the CIL committing list so it can be found
* during log forces to extract the commit lsn of the sequence that
* needs to be forced.
*/
INIT_LIST_HEAD(&new_ctx->committing);
INIT_LIST_HEAD(&new_ctx->busy_extents);
- init_waitqueue_head(&new_ctx->push_wait);
new_ctx->sequence = ctx->sequence + 1;
new_ctx->cil = cil;
cil->xc_ctx = new_ctx;
@@ -937,7 +937,7 @@ xlog_cil_push_background(
if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
ASSERT(cil->xc_ctx->space_used < log->l_logsize);
- xlog_wait(&cil->xc_ctx->push_wait, &cil->xc_push_lock);
+ xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
return;
}
@@ -1216,12 +1216,12 @@ xlog_cil_init(
INIT_LIST_HEAD(&cil->xc_committing);
spin_lock_init(&cil->xc_cil_lock);
spin_lock_init(&cil->xc_push_lock);
+ init_waitqueue_head(&cil->xc_push_wait);
init_rwsem(&cil->xc_ctx_lock);
init_waitqueue_head(&cil->xc_commit_wait);
INIT_LIST_HEAD(&ctx->committing);
INIT_LIST_HEAD(&ctx->busy_extents);
- init_waitqueue_head(&ctx->push_wait);
ctx->sequence = 1;
ctx->cil = cil;
cil->xc_ctx = ctx;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index ec22c7a..75a6287 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -240,7 +240,6 @@ struct xfs_cil_ctx {
struct xfs_log_vec *lv_chain; /* logvecs being pushed */
struct list_head iclog_entry;
struct list_head committing; /* ctx committing list */
- wait_queue_head_t push_wait; /* background push throttle */
struct work_struct discard_endio_work;
};
@@ -274,6 +273,7 @@ struct xfs_cil {
wait_queue_head_t xc_commit_wait;
xfs_lsn_t xc_current_sequence;
struct work_struct xc_push_work;
+ wait_queue_head_t xc_push_wait; /* background push throttle */
} ____cacheline_aligned_in_smp;
/*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c79d833..34c1c4f 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -2169,12 +2169,11 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
*/
static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
{
- struct pci_dev *bridge = pci_upstream_bridge(dev);
-
- while (bridge) {
- if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
- return bridge;
- bridge = pci_upstream_bridge(bridge);
+ while (dev) {
+ if (pci_is_pcie(dev) &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ dev = pci_upstream_bridge(dev);
}
return NULL;
diff --git a/init/Kconfig b/init/Kconfig
index a46aa8f..0498af5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -49,13 +49,13 @@
config CC_CAN_LINK
bool
- default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m64-flag)) if 64BIT
- default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m32-flag))
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag))
config CC_CAN_LINK_STATIC
bool
- default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m64-flag)) if 64BIT
- default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m32-flag))
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag) -static) if 64BIT
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag) -static)
config CC_HAS_ASM_GOTO
def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
diff --git a/kernel/module.c b/kernel/module.c
index 0c6573b..bee1c25 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2785,7 +2785,7 @@ void * __weak module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
- NUMA_NO_NODE, __func__);
+ NUMA_NO_NODE, __builtin_return_address(0));
}
bool __weak module_init_section(const char *name)
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 16dd1e6b..9eb39c2 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -723,7 +723,7 @@ kfree_perf_init(void)
schedule_timeout_uninterruptible(1);
}
- pr_alert("kfree object size=%lu\n", kfree_mult * sizeof(struct kfree_obj));
+ pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
GFP_KERNEL);
diff --git a/mm/cma.c b/mm/cma.c
index 0463ad2..26ecff8 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -339,13 +339,13 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
*/
if (base < highmem_start && limit > highmem_start) {
addr = memblock_alloc_range_nid(size, alignment,
- highmem_start, limit, nid, false);
+ highmem_start, limit, nid, true);
limit = highmem_start;
}
if (!addr) {
addr = memblock_alloc_range_nid(size, alignment, base,
- limit, nid, false);
+ limit, nid, true);
if (!addr) {
ret = -ENOMEM;
goto err;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 57ece74..fab4485 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1593,7 +1593,7 @@ static struct address_space *_get_hugetlb_page_mapping(struct page *hpage)
/* Use first found vma */
pgoff_start = page_to_pgoff(hpage);
- pgoff_end = pgoff_start + hpage_nr_pages(hpage) - 1;
+ pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 48eb0f1..e028b87c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7832,7 +7832,7 @@ void setup_per_zone_wmarks(void)
* Initialise min_free_kbytes.
*
* For small machines we want it small (128k min). For large machines
- * we want it large (64MB max). But it is not linear, because network
+ * we want it large (256MB max). But it is not linear, because network
* bandwidth does not increase linearly with machine size. We use
*
* min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
diff --git a/samples/vfs/test-statx.c b/samples/vfs/test-statx.c
index 76c577e..49c7a46 100644
--- a/samples/vfs/test-statx.c
+++ b/samples/vfs/test-statx.c
@@ -23,6 +23,8 @@
#include <linux/fcntl.h>
#define statx foo
#define statx_timestamp foo_timestamp
+struct statx;
+struct statx_timestamp;
#include <sys/stat.h>
#undef statx
#undef statx_timestamp
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
index ce0b99f..ae19fb0 100644
--- a/scripts/gcc-plugins/Kconfig
+++ b/scripts/gcc-plugins/Kconfig
@@ -78,7 +78,7 @@
source tree isn't cleaned after kernel installation).
The seed used for compilation is located at
- scripts/gcc-plgins/randomize_layout_seed.h. It remains after
+ scripts/gcc-plugins/randomize_layout_seed.h. It remains after
a make clean to allow for external modules to be compiled with
the existing seed and will be removed by a make mrproper or
make distclean.
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index c0ac8f7..4a61612 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -4,27 +4,19 @@
* Copyright (C) 2015 Boris Barbulovski <[email protected]>
*/
-#include <qglobal.h>
-
-#include <QMainWindow>
-#include <QList>
-#include <qtextbrowser.h>
#include <QAction>
+#include <QApplication>
+#include <QCloseEvent>
+#include <QDebug>
+#include <QDesktopWidget>
#include <QFileDialog>
+#include <QLabel>
+#include <QLayout>
+#include <QList>
#include <QMenu>
-
-#include <qapplication.h>
-#include <qdesktopwidget.h>
-#include <qtoolbar.h>
-#include <qlayout.h>
-#include <qsplitter.h>
-#include <qlineedit.h>
-#include <qlabel.h>
-#include <qpushbutton.h>
-#include <qmenubar.h>
-#include <qmessagebox.h>
-#include <qregexp.h>
-#include <qevent.h>
+#include <QMenuBar>
+#include <QMessageBox>
+#include <QToolBar>
#include <stdlib.h>
@@ -445,9 +437,10 @@
if (rootEntry != &rootmenu && (mode == singleMode ||
(mode == symbolMode && rootEntry->parent != &rootmenu))) {
item = (ConfigItem *)topLevelItem(0);
- if (!item)
+ if (!item && mode != symbolMode) {
item = new ConfigItem(this, 0, true);
- last = item;
+ last = item;
+ }
}
if ((mode == singleMode || (mode == symbolMode && !(rootEntry->flags & MENU_ROOT))) &&
rootEntry->sym && rootEntry->prompt) {
@@ -545,7 +538,7 @@
rootEntry = menu;
updateListAll();
if (currentItem()) {
- currentItem()->setSelected(hasFocus());
+ setSelected(currentItem(), hasFocus());
scrollToItem(currentItem());
}
}
@@ -873,7 +866,7 @@
ConfigItem* item = (ConfigItem *)currentItem();
if (item) {
- item->setSelected(true);
+ setSelected(item, true);
menu = item->menu;
}
emit gotFocus(menu);
@@ -1021,7 +1014,7 @@
: Parent(parent), sym(0), _menu(0)
{
setObjectName(name);
-
+ setOpenLinks(false);
if (!objectName().isEmpty()) {
configSettings->beginGroup(objectName());
@@ -1094,7 +1087,7 @@
if (sym->name) {
head += " (";
if (showDebug())
- head += QString().sprintf("<a href=\"s%p\">", sym);
+ head += QString().sprintf("<a href=\"s%s\">", sym->name);
head += print_filter(sym->name);
if (showDebug())
head += "</a>";
@@ -1103,7 +1096,7 @@
} else if (sym->name) {
head += "<big><b>";
if (showDebug())
- head += QString().sprintf("<a href=\"s%p\">", sym);
+ head += QString().sprintf("<a href=\"s%s\">", sym->name);
head += print_filter(sym->name);
if (showDebug())
head += "</a>";
@@ -1154,13 +1147,16 @@
switch (prop->type) {
case P_PROMPT:
case P_MENU:
- debug += QString().sprintf("prompt: <a href=\"m%p\">", prop->menu);
+ debug += QString().sprintf("prompt: <a href=\"m%s\">", sym->name);
debug += print_filter(prop->text);
debug += "</a><br>";
break;
case P_DEFAULT:
case P_SELECT:
case P_RANGE:
+ case P_COMMENT:
+ case P_IMPLY:
+ case P_SYMBOL:
debug += prop_get_type_name(prop->type);
debug += ": ";
expr_print(prop->expr, expr_print_help, &debug, E_NONE);
@@ -1226,13 +1222,62 @@
QString str2 = print_filter(str);
if (sym && sym->name && !(sym->flags & SYMBOL_CONST)) {
- *text += QString().sprintf("<a href=\"s%p\">", sym);
+ *text += QString().sprintf("<a href=\"s%s\">", sym->name);
*text += str2;
*text += "</a>";
} else
*text += str2;
}
+void ConfigInfoView::clicked(const QUrl &url)
+{
+ QByteArray str = url.toEncoded();
+ const std::size_t count = str.size();
+ char *data = new char[count + 1];
+ struct symbol **result;
+ struct menu *m = NULL;
+
+ if (count < 1) {
+ qInfo() << "Clicked link is empty";
+ delete data;
+ return;
+ }
+
+ memcpy(data, str.constData(), count);
+ data[count] = '\0';
+
+ /* Seek for exact match */
+ data[0] = '^';
+ strcat(data, "$");
+ result = sym_re_search(data);
+ if (!result) {
+ qInfo() << "Clicked symbol is invalid:" << data;
+ delete data;
+ return;
+ }
+
+ sym = *result;
+
+ /* Seek for the menu which holds the symbol */
+ for (struct property *prop = sym->prop; prop; prop = prop->next) {
+ if (prop->type != P_PROMPT && prop->type != P_MENU)
+ continue;
+ m = prop->menu;
+ break;
+ }
+
+ if (!m) {
+ /* Symbol is not visible as a menu */
+ symbolInfo();
+ emit showDebugChanged(true);
+ } else {
+ emit menuSelected(m);
+ }
+
+ free(result);
+ delete data;
+}
+
QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
{
QMenu* popup = Parent::createStandardContextMenu(pos);
@@ -1402,18 +1447,22 @@
addToolBar(toolBar);
backAction = new QAction(QPixmap(xpm_back), "Back", this);
- connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
- backAction->setEnabled(false);
+ connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
+
QAction *quitAction = new QAction("&Quit", this);
quitAction->setShortcut(Qt::CTRL + Qt::Key_Q);
- connect(quitAction, SIGNAL(triggered(bool)), SLOT(close()));
+ connect(quitAction, SIGNAL(triggered(bool)), SLOT(close()));
+
QAction *loadAction = new QAction(QPixmap(xpm_load), "&Load", this);
loadAction->setShortcut(Qt::CTRL + Qt::Key_L);
- connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig()));
+ connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig()));
+
saveAction = new QAction(QPixmap(xpm_save), "&Save", this);
saveAction->setShortcut(Qt::CTRL + Qt::Key_S);
- connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig()));
+ connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig()));
+
conf_set_changed_callback(conf_changed);
+
// Set saveAction's initial state
conf_changed();
configname = xstrdup(conf_get_configname());
@@ -1506,6 +1555,9 @@
helpMenu->addAction(showIntroAction);
helpMenu->addAction(showAboutAction);
+ connect (helpText, SIGNAL (anchorClicked (const QUrl &)),
+ helpText, SLOT (clicked (const QUrl &)) );
+
connect(configList, SIGNAL(menuChanged(struct menu *)),
helpText, SLOT(setInfo(struct menu *)));
connect(configList, SIGNAL(menuSelected(struct menu *)),
@@ -1611,21 +1663,11 @@
void ConfigMainWindow::changeItens(struct menu *menu)
{
configList->setRootMenu(menu);
-
- if (configList->rootEntry->parent == &rootmenu)
- backAction->setEnabled(false);
- else
- backAction->setEnabled(true);
}
void ConfigMainWindow::changeMenu(struct menu *menu)
{
menuList->setRootMenu(menu);
-
- if (menuList->rootEntry->parent == &rootmenu)
- backAction->setEnabled(false);
- else
- backAction->setEnabled(true);
}
void ConfigMainWindow::setMenuLink(struct menu *menu)
@@ -1645,22 +1687,26 @@
return;
list->setRootMenu(parent);
break;
- case symbolMode:
+ case menuMode:
if (menu->flags & MENU_ROOT) {
- configList->setRootMenu(menu);
+ menuList->setRootMenu(menu);
configList->clearSelection();
- list = menuList;
- } else {
list = configList;
+ } else {
parent = menu_get_parent_menu(menu->parent);
if (!parent)
return;
- item = menuList->findConfigItem(parent);
+
+ /* Select the config view */
+ item = configList->findConfigItem(parent);
if (item) {
- item->setSelected(true);
- menuList->scrollToItem(item);
+ configList->setSelected(item, true);
+ configList->scrollToItem(item);
}
- list->setRootMenu(parent);
+
+ menuList->setRootMenu(parent);
+ menuList->clearSelection();
+ list = menuList;
}
break;
case fullMode:
@@ -1673,9 +1719,10 @@
if (list) {
item = list->findConfigItem(menu);
if (item) {
- item->setSelected(true);
+ list->setSelected(item, true);
list->scrollToItem(item);
list->setFocus();
+ helpText->setInfo(menu);
}
}
}
@@ -1688,25 +1735,11 @@
void ConfigMainWindow::goBack(void)
{
- ConfigItem* item, *oldSelection;
-
- configList->setParentMenu();
+qInfo() << __FUNCTION__;
if (configList->rootEntry == &rootmenu)
- backAction->setEnabled(false);
-
- if (menuList->selectedItems().count() == 0)
return;
- item = (ConfigItem*)menuList->selectedItems().first();
- oldSelection = item;
- while (item) {
- if (item->menu == configList->rootEntry) {
- oldSelection->setSelected(false);
- item->setSelected(true);
- break;
- }
- item = (ConfigItem*)item->parent();
- }
+ configList->setParentMenu();
}
void ConfigMainWindow::showSingleView(void)
@@ -1718,6 +1751,8 @@
fullViewAction->setEnabled(true);
fullViewAction->setChecked(false);
+ backAction->setEnabled(true);
+
menuView->hide();
menuList->setRootMenu(0);
configList->mode = singleMode;
@@ -1737,6 +1772,8 @@
fullViewAction->setEnabled(true);
fullViewAction->setChecked(false);
+ backAction->setEnabled(false);
+
configList->mode = menuMode;
if (configList->rootEntry == &rootmenu)
configList->updateListAll();
@@ -1760,6 +1797,8 @@
fullViewAction->setEnabled(false);
fullViewAction->setChecked(true);
+ backAction->setEnabled(false);
+
menuView->hide();
menuList->setRootMenu(0);
configList->mode = fullMode;
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h
index c879d79..fb9e972 100644
--- a/scripts/kconfig/qconf.h
+++ b/scripts/kconfig/qconf.h
@@ -3,17 +3,17 @@
* Copyright (C) 2002 Roman Zippel <[email protected]>
*/
-#include <QTextBrowser>
-#include <QTreeWidget>
-#include <QMainWindow>
-#include <QHeaderView>
-#include <qsettings.h>
-#include <QPushButton>
-#include <QSettings>
-#include <QLineEdit>
-#include <QSplitter>
#include <QCheckBox>
#include <QDialog>
+#include <QHeaderView>
+#include <QLineEdit>
+#include <QMainWindow>
+#include <QPushButton>
+#include <QSettings>
+#include <QSplitter>
+#include <QTextBrowser>
+#include <QTreeWidget>
+
#include "expr.h"
class ConfigView;
@@ -45,11 +45,17 @@ class ConfigList : public QTreeWidget {
public:
ConfigList(ConfigView* p, const char *name = 0);
void reinit(void);
+ ConfigItem* findConfigItem(struct menu *);
ConfigView* parent(void) const
{
return (ConfigView*)Parent::parent();
}
- ConfigItem* findConfigItem(struct menu *);
+ void setSelected(QTreeWidgetItem *item, bool enable) {
+ for (int i = 0; i < selectedItems().size(); i++)
+ selectedItems().at(i)->setSelected(false);
+
+ item->setSelected(enable);
+ }
protected:
void keyPressEvent(QKeyEvent *e);
@@ -250,6 +256,7 @@ public slots:
void setInfo(struct menu *menu);
void saveSettings(void);
void setShowDebug(bool);
+ void clicked (const QUrl &url);
signals:
void showDebugChanged(bool);
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 5f16821..d2796ea 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -70,10 +70,10 @@
EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64)
-$(BINARIES_32): $(OUTPUT)/%_32: %.c
+$(BINARIES_32): $(OUTPUT)/%_32: %.c helpers.h
$(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
-$(BINARIES_64): $(OUTPUT)/%_64: %.c
+$(BINARIES_64): $(OUTPUT)/%_64: %.c helpers.h
$(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
# x86_64 users should be encouraged to install 32-bit libraries
diff --git a/tools/testing/selftests/x86/helpers.h b/tools/testing/selftests/x86/helpers.h
new file mode 100644
index 0000000..f5ff2a2
--- /dev/null
+++ b/tools/testing/selftests/x86/helpers.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __SELFTESTS_X86_HELPERS_H
+#define __SELFTESTS_X86_HELPERS_H
+
+#include <asm/processor-flags.h>
+
+static inline unsigned long get_eflags(void)
+{
+ unsigned long eflags;
+
+ asm volatile (
+#ifdef __x86_64__
+ "subq $128, %%rsp\n\t"
+ "pushfq\n\t"
+ "popq %0\n\t"
+ "addq $128, %%rsp"
+#else
+ "pushfl\n\t"
+ "popl %0"
+#endif
+ : "=r" (eflags) :: "memory");
+
+ return eflags;
+}
+
+static inline void set_eflags(unsigned long eflags)
+{
+ asm volatile (
+#ifdef __x86_64__
+ "subq $128, %%rsp\n\t"
+ "pushq %0\n\t"
+ "popfq\n\t"
+ "addq $128, %%rsp"
+#else
+ "pushl %0\n\t"
+ "popfl"
+#endif
+ :: "r" (eflags) : "flags", "memory");
+}
+
+#endif /* __SELFTESTS_X86_HELPERS_H */
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index 1063328..120ac74 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -31,6 +31,8 @@
#include <sys/ptrace.h>
#include <sys/user.h>
+#include "helpers.h"
+
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
@@ -67,21 +69,6 @@ static unsigned char altstack_data[SIGSTKSZ];
# define INT80_CLOBBERS
#endif
-static unsigned long get_eflags(void)
-{
- unsigned long eflags;
- asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
- return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
- asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
- : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
diff --git a/tools/testing/selftests/x86/syscall_arg_fault.c b/tools/testing/selftests/x86/syscall_arg_fault.c
index bc0ecc2..5b7abeb 100644
--- a/tools/testing/selftests/x86/syscall_arg_fault.c
+++ b/tools/testing/selftests/x86/syscall_arg_fault.c
@@ -15,30 +15,11 @@
#include <setjmp.h>
#include <errno.h>
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
+#include "helpers.h"
/* Our sigaltstack scratch space. */
static unsigned char altstack_data[SIGSTKSZ];
-static unsigned long get_eflags(void)
-{
- unsigned long eflags;
- asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
- return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
- asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
- : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c
index 02309a1..a108b80 100644
--- a/tools/testing/selftests/x86/syscall_nt.c
+++ b/tools/testing/selftests/x86/syscall_nt.c
@@ -13,29 +13,11 @@
#include <signal.h>
#include <err.h>
#include <sys/syscall.h>
-#include <asm/processor-flags.h>
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
+#include "helpers.h"
static unsigned int nerrs;
-static unsigned long get_eflags(void)
-{
- unsigned long eflags;
- asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
- return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
- asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
- : : "rm" (eflags) : "flags");
-}
-
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
@@ -59,6 +41,7 @@ static void do_it(unsigned long extraflags)
set_eflags(get_eflags() | extraflags);
syscall(SYS_getpid);
flags = get_eflags();
+ set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
if ((flags & extraflags) == extraflags) {
printf("[OK]\tThe syscall worked and flags are still set\n");
} else {
@@ -73,6 +56,12 @@ int main(void)
printf("[RUN]\tSet NT and issue a syscall\n");
do_it(X86_EFLAGS_NT);
+ printf("[RUN]\tSet AC and issue a syscall\n");
+ do_it(X86_EFLAGS_AC);
+
+ printf("[RUN]\tSet NT|AC and issue a syscall\n");
+ do_it(X86_EFLAGS_NT | X86_EFLAGS_AC);
+
/*
* Now try it again with TF set -- TF forces returns via IRET in all
* cases except non-ptregs-using 64-bit full fast path syscalls.
@@ -80,8 +69,28 @@ int main(void)
sethandler(SIGTRAP, sigtrap, 0);
+ printf("[RUN]\tSet TF and issue a syscall\n");
+ do_it(X86_EFLAGS_TF);
+
printf("[RUN]\tSet NT|TF and issue a syscall\n");
do_it(X86_EFLAGS_NT | X86_EFLAGS_TF);
+ printf("[RUN]\tSet AC|TF and issue a syscall\n");
+ do_it(X86_EFLAGS_AC | X86_EFLAGS_TF);
+
+ printf("[RUN]\tSet NT|AC|TF and issue a syscall\n");
+ do_it(X86_EFLAGS_NT | X86_EFLAGS_AC | X86_EFLAGS_TF);
+
+ /*
+ * Now try DF. This is evil and it's plausible that we will crash
+ * glibc, but glibc would have to do something rather surprising
+ * for this to happen.
+ */
+ printf("[RUN]\tSet DF and issue a syscall\n");
+ do_it(X86_EFLAGS_DF);
+
+ printf("[RUN]\tSet TF|DF and issue a syscall\n");
+ do_it(X86_EFLAGS_TF | X86_EFLAGS_DF);
+
return nerrs == 0 ? 0 : 1;
}
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index a4f4d4c..c41f24b 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -20,6 +20,8 @@
#include <setjmp.h>
#include <sys/uio.h>
+#include "helpers.h"
+
#ifdef __x86_64__
# define VSYS(x) (x)
#else
@@ -493,21 +495,8 @@ static int test_process_vm_readv(void)
}
#ifdef __x86_64__
-#define X86_EFLAGS_TF (1UL << 8)
static volatile sig_atomic_t num_vsyscall_traps;
-static unsigned long get_eflags(void)
-{
- unsigned long eflags;
- asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
- return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
- asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
-}
-
static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t *)ctx_void;
diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c
index 0075ccd..4c311e1a 100644
--- a/tools/testing/selftests/x86/unwind_vdso.c
+++ b/tools/testing/selftests/x86/unwind_vdso.c
@@ -11,6 +11,8 @@
#include <features.h>
#include <stdio.h>
+#include "helpers.h"
+
#if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16
int main()
@@ -53,27 +55,6 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
err(1, "sigaction");
}
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
-
-static unsigned long get_eflags(void)
-{
- unsigned long eflags;
- asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
- return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
- asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
- : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
static volatile sig_atomic_t nerrs;
static unsigned long sysinfo;
static bool got_sysinfo = false;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a852af5..0a68c9d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3350,7 +3350,8 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
if (kvm_sigmask.len != sizeof(compat_sigset_t))
goto out;
r = -EFAULT;
- if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
+ if (get_compat_sigset(&sigset,
+ (compat_sigset_t __user *)sigmask_arg->sigset))
goto out;
r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
} else