Skip to content

Commit ea0c82f

Browse files
committed
Merge remote-tracking branch 'stable/linux-6.13.y' into v6.13+
2 parents 9ca2004 + 648e04a commit ea0c82f

409 files changed

Lines changed: 5023 additions & 2235 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

Documentation/admin-guide/sysctl/kernel.rst

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -212,6 +212,17 @@ pid>/``).
212212
This value defaults to 0.
213213
214214

215+
core_sort_vma
216+
=============
217+
218+
The default coredump writes VMAs in address order. By setting
219+
``core_sort_vma`` to 1, VMAs will be written from smallest size
220+
to largest size. This is known to break at least elfutils, but
221+
can be handy when dealing with very large (and truncated)
222+
coredumps where the more useful debugging details are included
223+
in the smaller VMAs.
224+
225+
215226
core_uses_pid
216227
=============
217228

Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,7 @@ properties:
146146
maxItems: 2
147147

148148
pwm-names:
149+
minItems: 1
149150
items:
150151
- const: convst1
151152
- const: convst2

Makefile

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# SPDX-License-Identifier: GPL-2.0
22
VERSION = 6
33
PATCHLEVEL = 13
4-
SUBLEVEL = 5
4+
SUBLEVEL = 7
55
EXTRAVERSION =
66
NAME = Baby Opossum Posse
77

@@ -1121,6 +1121,11 @@ endif
11211121
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
11221122
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
11231123

1124+
# userspace programs are linked via the compiler, use the correct linker
1125+
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy)
1126+
KBUILD_USERLDFLAGS += --ld-path=$(LD)
1127+
endif
1128+
11241129
# make the checker run with the right architecture
11251130
CHECKFLAGS += --arch=$(ARCH)
11261131

arch/arm/mm/fault-armv.c

Lines changed: 25 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
6262
}
6363

6464
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
65-
unsigned long pfn, struct vm_fault *vmf)
65+
unsigned long pfn, bool need_lock)
6666
{
6767
spinlock_t *ptl;
6868
pgd_t *pgd;
@@ -99,12 +99,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
9999
if (!pte)
100100
return 0;
101101

102-
/*
103-
* If we are using split PTE locks, then we need to take the page
104-
* lock here. Otherwise we are using shared mm->page_table_lock
105-
* which is already locked, thus cannot take it.
106-
*/
107-
if (ptl != vmf->ptl) {
102+
if (need_lock) {
103+
/*
104+
* Use nested version here to indicate that we are already
105+
* holding one similar spinlock.
106+
*/
108107
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
109108
if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
110109
pte_unmap_unlock(pte, ptl);
@@ -114,7 +113,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
114113

115114
ret = do_adjust_pte(vma, address, pfn, pte);
116115

117-
if (ptl != vmf->ptl)
116+
if (need_lock)
118117
spin_unlock(ptl);
119118
pte_unmap(pte);
120119

@@ -123,9 +122,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
123122

124123
static void
125124
make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
126-
unsigned long addr, pte_t *ptep, unsigned long pfn,
127-
struct vm_fault *vmf)
125+
unsigned long addr, pte_t *ptep, unsigned long pfn)
128126
{
127+
const unsigned long pmd_start_addr = ALIGN_DOWN(addr, PMD_SIZE);
128+
const unsigned long pmd_end_addr = pmd_start_addr + PMD_SIZE;
129129
struct mm_struct *mm = vma->vm_mm;
130130
struct vm_area_struct *mpnt;
131131
unsigned long offset;
@@ -141,6 +141,14 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
141141
*/
142142
flush_dcache_mmap_lock(mapping);
143143
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
144+
/*
145+
* If we are using split PTE locks, then we need to take the pte
146+
* lock. Otherwise we are using shared mm->page_table_lock which
147+
* is already locked, thus cannot take it.
148+
*/
149+
bool need_lock = IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS);
150+
unsigned long mpnt_addr;
151+
144152
/*
145153
* If this VMA is not in our MM, we can ignore it.
146154
* Note that we intentionally mask out the VMA
@@ -151,7 +159,12 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
151159
if (!(mpnt->vm_flags & VM_MAYSHARE))
152160
continue;
153161
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
154-
aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn, vmf);
162+
mpnt_addr = mpnt->vm_start + offset;
163+
164+
/* Avoid deadlocks by not grabbing the same PTE lock again. */
165+
if (mpnt_addr >= pmd_start_addr && mpnt_addr < pmd_end_addr)
166+
need_lock = false;
167+
aliases += adjust_pte(mpnt, mpnt_addr, pfn, need_lock);
155168
}
156169
flush_dcache_mmap_unlock(mapping);
157170
if (aliases)
@@ -194,7 +207,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
194207
__flush_dcache_folio(mapping, folio);
195208
if (mapping) {
196209
if (cache_is_vivt())
197-
make_coherent(mapping, vma, addr, ptep, pfn, vmf);
210+
make_coherent(mapping, vma, addr, ptep, pfn);
198211
else if (vma->vm_flags & VM_EXEC)
199212
__flush_icache_all();
200213
}

arch/arm64/include/asm/hugetlb.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
4242
unsigned long addr, pte_t *ptep,
4343
pte_t pte, int dirty);
4444
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
45-
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
46-
unsigned long addr, pte_t *ptep);
45+
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
46+
pte_t *ptep, unsigned long sz);
4747
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
4848
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
4949
unsigned long addr, pte_t *ptep);

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1262,7 +1262,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
12621262
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
12631263
int __init kvm_arm_vmid_alloc_init(void);
12641264
void __init kvm_arm_vmid_alloc_free(void);
1265-
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1265+
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
12661266
void kvm_arm_vmid_clear_active(void);
12671267

12681268
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)

arch/arm64/kvm/arm.c

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -580,6 +580,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
580580
mmu = vcpu->arch.hw_mmu;
581581
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
582582

583+
/*
584+
* Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
585+
* which happens eagerly in VHE.
586+
*
587+
* Also, the VMID allocator only preserves VMIDs that are active at the
588+
* time of rollover, so KVM might need to grab a new VMID for the MMU if
589+
* this is called from kvm_sched_in().
590+
*/
591+
kvm_arm_vmid_update(&mmu->vmid);
592+
583593
/*
584594
* We guarantee that both TLBs and I-cache are private to each
585595
* vcpu. If detecting that a vcpu from the same VM has
@@ -1147,18 +1157,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11471157
*/
11481158
preempt_disable();
11491159

1150-
/*
1151-
* The VMID allocator only tracks active VMIDs per
1152-
* physical CPU, and therefore the VMID allocated may not be
1153-
* preserved on VMID roll-over if the task was preempted,
1154-
* making a thread's VMID inactive. So we need to call
1155-
* kvm_arm_vmid_update() in non-premptible context.
1156-
*/
1157-
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
1158-
has_vhe())
1159-
__load_stage2(vcpu->arch.hw_mmu,
1160-
vcpu->arch.hw_mmu->arch);
1161-
11621160
kvm_pmu_flush_hwstate(vcpu);
11631161

11641162
local_irq_disable();

arch/arm64/kvm/vmid.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
135135
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
136136
}
137137

138-
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
138+
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
139139
{
140140
unsigned long flags;
141141
u64 vmid, old_active_vmid;
142-
bool updated = false;
143142

144143
vmid = atomic64_read(&kvm_vmid->id);
145144

@@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
157156
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
158157
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
159158
old_active_vmid, vmid))
160-
return false;
159+
return;
161160

162161
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
163162

164163
/* Check that our VMID belongs to the current generation. */
165164
vmid = atomic64_read(&kvm_vmid->id);
166-
if (!vmid_gen_match(vmid)) {
165+
if (!vmid_gen_match(vmid))
167166
vmid = new_vmid(kvm_vmid);
168-
updated = true;
169-
}
170167

171168
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
172169
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
173-
174-
return updated;
175170
}
176171

177172
/*

arch/arm64/mm/hugetlbpage.c

Lines changed: 25 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
100100

101101
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
102102
{
103-
int contig_ptes = 0;
103+
int contig_ptes = 1;
104104

105105
*pgsize = size;
106106

107107
switch (size) {
108-
#ifndef __PAGETABLE_PMD_FOLDED
109-
case PUD_SIZE:
110-
if (pud_sect_supported())
111-
contig_ptes = 1;
112-
break;
113-
#endif
114-
case PMD_SIZE:
115-
contig_ptes = 1;
116-
break;
117108
case CONT_PMD_SIZE:
118109
*pgsize = PMD_SIZE;
119110
contig_ptes = CONT_PMDS;
@@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
122113
*pgsize = PAGE_SIZE;
123114
contig_ptes = CONT_PTES;
124115
break;
116+
default:
117+
WARN_ON(!__hugetlb_valid_size(size));
125118
}
126119

127120
return contig_ptes;
@@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
163156
unsigned long pgsize,
164157
unsigned long ncontig)
165158
{
166-
pte_t orig_pte = __ptep_get(ptep);
167-
unsigned long i;
168-
169-
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
170-
pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
171-
172-
/*
173-
* If HW_AFDBM is enabled, then the HW could turn on
174-
* the dirty or accessed bit for any page in the set,
175-
* so check them all.
176-
*/
177-
if (pte_dirty(pte))
178-
orig_pte = pte_mkdirty(orig_pte);
179-
180-
if (pte_young(pte))
181-
orig_pte = pte_mkyoung(orig_pte);
159+
pte_t pte, tmp_pte;
160+
bool present;
161+
162+
pte = __ptep_get_and_clear(mm, addr, ptep);
163+
present = pte_present(pte);
164+
while (--ncontig) {
165+
ptep++;
166+
addr += pgsize;
167+
tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
168+
if (present) {
169+
if (pte_dirty(tmp_pte))
170+
pte = pte_mkdirty(pte);
171+
if (pte_young(tmp_pte))
172+
pte = pte_mkyoung(pte);
173+
}
182174
}
183-
return orig_pte;
175+
return pte;
184176
}
185177

186178
static pte_t get_clear_contig_flush(struct mm_struct *mm,
@@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
396388
__pte_clear(mm, addr, ptep);
397389
}
398390

399-
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
400-
unsigned long addr, pte_t *ptep)
391+
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
392+
pte_t *ptep, unsigned long sz)
401393
{
402394
int ncontig;
403395
size_t pgsize;
404-
pte_t orig_pte = __ptep_get(ptep);
405-
406-
if (!pte_cont(orig_pte))
407-
return __ptep_get_and_clear(mm, addr, ptep);
408-
409-
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
410396

397+
ncontig = num_contig_ptes(sz, &pgsize);
411398
return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
412399
}
413400

@@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
549536

550537
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
551538
{
539+
unsigned long psize = huge_page_size(hstate_vma(vma));
540+
552541
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
553542
/*
554543
* Break-before-make (BBM) is required for all user space mappings
@@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
558547
if (pte_user_exec(__ptep_get(ptep)))
559548
return huge_ptep_clear_flush(vma, addr, ptep);
560549
}
561-
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
550+
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
562551
}
563552

564553
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,

arch/arm64/mm/init.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
279279

280280
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
281281
extern u16 memstart_offset_seed;
282-
283-
/*
284-
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
285-
* map randomization can be enabled by shrinking the IPA space.
286-
*/
287-
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
282+
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
288283
int parange = cpuid_feature_extract_unsigned_field(
289284
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
290285
s64 range = linear_region_size -

0 commit comments

Comments
 (0)