Skip to content

Commit f45dc31

Browse files
committed
Merge remote-tracking branch 'stable/linux-4.4.y' into rpi-4.4.y
2 parents 1325705 + 351d2d4 commit f45dc31

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

97 files changed

+1167
-477
lines changed

Documentation/serial/tty.txt

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -213,9 +213,6 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write
213213

214214
TTY_OTHER_CLOSED Device is a pty and the other side has closed.
215215

216-
TTY_OTHER_DONE Device is a pty and the other side has closed and
217-
all pending input processing has been completed.
218-
219216
TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
220217
smaller chunks.
221218

Makefile

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
VERSION = 4
22
PATCHLEVEL = 4
3-
SUBLEVEL = 11
3+
SUBLEVEL = 12
44
EXTRAVERSION =
55
NAME = Blurry Fish Butt
66

@@ -682,9 +682,10 @@ KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
682682
KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
683683
else
684684

685-
# This warning generated too much noise in a regular build.
686-
# Use make W=1 to enable this warning (see scripts/Makefile.build)
685+
# These warnings generated too much noise in a regular build.
686+
# Use make W=1 to enable them (see scripts/Makefile.build)
687687
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
688+
KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
688689
endif
689690

690691
ifdef CONFIG_FRAME_POINTER

arch/arm/kvm/mmu.c

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -886,11 +886,14 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
886886
VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
887887

888888
old_pmd = *pmd;
889-
kvm_set_pmd(pmd, *new_pmd);
890-
if (pmd_present(old_pmd))
889+
if (pmd_present(old_pmd)) {
890+
pmd_clear(pmd);
891891
kvm_tlb_flush_vmid_ipa(kvm, addr);
892-
else
892+
} else {
893893
get_page(virt_to_page(pmd));
894+
}
895+
896+
kvm_set_pmd(pmd, *new_pmd);
894897
return 0;
895898
}
896899

@@ -939,12 +942,14 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
939942

940943
/* Create 2nd stage page table mapping - Level 3 */
941944
old_pte = *pte;
942-
kvm_set_pte(pte, *new_pte);
943-
if (pte_present(old_pte))
945+
if (pte_present(old_pte)) {
946+
kvm_set_pte(pte, __pte(0));
944947
kvm_tlb_flush_vmid_ipa(kvm, addr);
945-
else
948+
} else {
946949
get_page(virt_to_page(pte));
950+
}
947951

952+
kvm_set_pte(pte, *new_pte);
948953
return 0;
949954
}
950955

arch/arm64/include/asm/pgtable-hwdef.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,6 @@
117117
* Section
118118
*/
119119
#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
120-
#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
121120
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
122121
#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
123122
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)

arch/arm64/include/asm/pgtable.h

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -347,6 +347,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
347347
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
348348
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
349349

350+
#define pmd_present(pmd) pte_present(pmd_pte(pmd))
350351
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
351352
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
352353
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
@@ -355,7 +356,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
355356
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
356357
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
357358
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
358-
#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
359+
#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
359360

360361
#define __HAVE_ARCH_PMD_WRITE
361362
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
@@ -394,7 +395,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
394395
unsigned long size, pgprot_t vma_prot);
395396

396397
#define pmd_none(pmd) (!pmd_val(pmd))
397-
#define pmd_present(pmd) (pmd_val(pmd))
398398

399399
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
400400

@@ -538,6 +538,21 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
538538
}
539539

540540
#ifdef CONFIG_ARM64_HW_AFDBM
541+
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
542+
extern int ptep_set_access_flags(struct vm_area_struct *vma,
543+
unsigned long address, pte_t *ptep,
544+
pte_t entry, int dirty);
545+
546+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
547+
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
548+
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
549+
unsigned long address, pmd_t *pmdp,
550+
pmd_t entry, int dirty)
551+
{
552+
return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
553+
}
554+
#endif
555+
541556
/*
542557
* Atomic pte/pmd modifications.
543558
*/
@@ -590,9 +605,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
590605
}
591606

592607
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
593-
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
594-
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
595-
unsigned long address, pmd_t *pmdp)
608+
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
609+
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
610+
unsigned long address, pmd_t *pmdp)
596611
{
597612
return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
598613
}

arch/arm64/kernel/cpuinfo.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,8 @@ static const char *const compat_hwcap_str[] = {
8585
"idivt",
8686
"vfpd32",
8787
"lpae",
88-
"evtstrm"
88+
"evtstrm",
89+
NULL
8990
};
9091

9192
static const char *const compat_hwcap2_str[] = {

arch/arm64/kvm/inject_fault.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
130130
esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
131131

132132
if (!is_iabt)
133-
esr |= ESR_ELx_EC_DABT_LOW;
133+
esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
134134

135135
vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
136136
}

arch/arm64/mm/fault.c

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,56 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
8181
printk("\n");
8282
}
8383

84+
#ifdef CONFIG_ARM64_HW_AFDBM
85+
/*
86+
* This function sets the access flags (dirty, accessed), as well as write
87+
* permission, and only to a more permissive setting.
88+
*
89+
* It needs to cope with hardware update of the accessed/dirty state by other
90+
* agents in the system and can safely skip the __sync_icache_dcache() call as,
91+
* like set_pte_at(), the PTE is never changed from no-exec to exec here.
92+
*
93+
* Returns whether or not the PTE actually changed.
94+
*/
95+
int ptep_set_access_flags(struct vm_area_struct *vma,
96+
unsigned long address, pte_t *ptep,
97+
pte_t entry, int dirty)
98+
{
99+
pteval_t old_pteval;
100+
unsigned int tmp;
101+
102+
if (pte_same(*ptep, entry))
103+
return 0;
104+
105+
/* only preserve the access flags and write permission */
106+
pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
107+
108+
/*
109+
* PTE_RDONLY is cleared by default in the asm below, so set it in
110+
* back if necessary (read-only or clean PTE).
111+
*/
112+
if (!pte_write(entry) || !dirty)
113+
pte_val(entry) |= PTE_RDONLY;
114+
115+
/*
116+
* Setting the flags must be done atomically to avoid racing with the
117+
* hardware update of the access/dirty state.
118+
*/
119+
asm volatile("// ptep_set_access_flags\n"
120+
" prfm pstl1strm, %2\n"
121+
"1: ldxr %0, %2\n"
122+
" and %0, %0, %3 // clear PTE_RDONLY\n"
123+
" orr %0, %0, %4 // set flags\n"
124+
" stxr %w1, %0, %2\n"
125+
" cbnz %w1, 1b\n"
126+
: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
127+
: "L" (~PTE_RDONLY), "r" (pte_val(entry)));
128+
129+
flush_tlb_fix_spurious_fault(vma, address);
130+
return 1;
131+
}
132+
#endif
133+
84134
/*
85135
* The kernel tried to access some page that wasn't present.
86136
*/

arch/mips/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -784,7 +784,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
784784

785785
uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
786786
void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
787-
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
787+
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
788788
void kvm_mips_init_count(struct kvm_vcpu *vcpu);
789789
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
790790
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);

arch/mips/kvm/emulate.c

Lines changed: 50 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
302302
*/
303303
static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
304304
{
305-
ktime_t expires;
305+
struct mips_coproc *cop0 = vcpu->arch.cop0;
306+
ktime_t expires, threshold;
307+
uint32_t count, compare;
306308
int running;
307309

308-
/* Is the hrtimer pending? */
310+
/* Calculate the biased and scaled guest CP0_Count */
311+
count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
312+
compare = kvm_read_c0_guest_compare(cop0);
313+
314+
/*
315+
* Find whether CP0_Count has reached the closest timer interrupt. If
316+
* not, we shouldn't inject it.
317+
*/
318+
if ((int32_t)(count - compare) < 0)
319+
return count;
320+
321+
/*
322+
* The CP0_Count we're going to return has already reached the closest
323+
* timer interrupt. Quickly check if it really is a new interrupt by
324+
* looking at whether the interval until the hrtimer expiry time is
325+
* less than 1/4 of the timer period.
326+
*/
309327
expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
310-
if (ktime_compare(now, expires) >= 0) {
328+
threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
329+
if (ktime_before(expires, threshold)) {
311330
/*
312331
* Cancel it while we handle it so there's no chance of
313332
* interference with the timeout handler.
@@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
329348
}
330349
}
331350

332-
/* Return the biased and scaled guest CP0_Count */
333-
return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
351+
return count;
334352
}
335353

336354
/**
@@ -419,32 +437,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
419437
hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
420438
}
421439

422-
/**
423-
* kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
424-
* @vcpu: Virtual CPU.
425-
*
426-
* Recalculates and updates the expiry time of the hrtimer. This can be used
427-
* after timer parameters have been altered which do not depend on the time that
428-
* the change occurs (in those cases kvm_mips_freeze_hrtimer() and
429-
* kvm_mips_resume_hrtimer() are used directly).
430-
*
431-
* It is guaranteed that no timer interrupts will be lost in the process.
432-
*
433-
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
434-
*/
435-
static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
436-
{
437-
ktime_t now;
438-
uint32_t count;
439-
440-
/*
441-
* freeze_hrtimer takes care of a timer interrupts <= count, and
442-
* resume_hrtimer the hrtimer takes care of a timer interrupts > count.
443-
*/
444-
now = kvm_mips_freeze_hrtimer(vcpu, &count);
445-
kvm_mips_resume_hrtimer(vcpu, now, count);
446-
}
447-
448440
/**
449441
* kvm_mips_write_count() - Modify the count and update timer.
450442
* @vcpu: Virtual CPU.
@@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
540532
* kvm_mips_write_compare() - Modify compare and update timer.
541533
* @vcpu: Virtual CPU.
542534
* @compare: New CP0_Compare value.
535+
* @ack: Whether to acknowledge timer interrupt.
543536
*
544537
* Update CP0_Compare to a new value and update the timeout.
538+
* If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
539+
* any pending timer interrupt is preserved.
545540
*/
546-
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
541+
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
547542
{
548543
struct mips_coproc *cop0 = vcpu->arch.cop0;
544+
int dc;
545+
u32 old_compare = kvm_read_c0_guest_compare(cop0);
546+
ktime_t now;
547+
uint32_t count;
549548

550549
/* if unchanged, must just be an ack */
551-
if (kvm_read_c0_guest_compare(cop0) == compare)
550+
if (old_compare == compare) {
551+
if (!ack)
552+
return;
553+
kvm_mips_callbacks->dequeue_timer_int(vcpu);
554+
kvm_write_c0_guest_compare(cop0, compare);
552555
return;
556+
}
557+
558+
/* freeze_hrtimer() takes care of timer interrupts <= count */
559+
dc = kvm_mips_count_disabled(vcpu);
560+
if (!dc)
561+
now = kvm_mips_freeze_hrtimer(vcpu, &count);
562+
563+
if (ack)
564+
kvm_mips_callbacks->dequeue_timer_int(vcpu);
553565

554-
/* Update compare */
555566
kvm_write_c0_guest_compare(cop0, compare);
556567

557-
/* Update timeout if count enabled */
558-
if (!kvm_mips_count_disabled(vcpu))
559-
kvm_mips_update_hrtimer(vcpu);
568+
/* resume_hrtimer() takes care of timer interrupts > count */
569+
if (!dc)
570+
kvm_mips_resume_hrtimer(vcpu, now, count);
560571
}
561572

562573
/**
@@ -1095,9 +1106,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
10951106

10961107
/* If we are writing to COMPARE */
10971108
/* Clear pending timer interrupt, if any */
1098-
kvm_mips_callbacks->dequeue_timer_int(vcpu);
10991109
kvm_mips_write_compare(vcpu,
1100-
vcpu->arch.gprs[rt]);
1110+
vcpu->arch.gprs[rt],
1111+
true);
11011112
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
11021113
unsigned int old_val, val, change;
11031114

arch/mips/kvm/trap_emul.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
547547
kvm_mips_write_count(vcpu, v);
548548
break;
549549
case KVM_REG_MIPS_CP0_COMPARE:
550-
kvm_mips_write_compare(vcpu, v);
550+
kvm_mips_write_compare(vcpu, v, false);
551551
break;
552552
case KVM_REG_MIPS_CP0_CAUSE:
553553
/*

arch/x86/kernel/cpu/perf_event_intel_pt.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -694,6 +694,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
694694

695695
/* clear STOP and INT from current entry */
696696
buf->topa_index[buf->stop_pos]->stop = 0;
697+
buf->topa_index[buf->stop_pos]->intr = 0;
697698
buf->topa_index[buf->intr_pos]->intr = 0;
698699

699700
/* how many pages till the STOP marker */
@@ -718,6 +719,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
718719
buf->intr_pos = idx;
719720

720721
buf->topa_index[buf->stop_pos]->stop = 1;
722+
buf->topa_index[buf->stop_pos]->intr = 1;
721723
buf->topa_index[buf->intr_pos]->intr = 1;
722724

723725
return 0;

arch/x86/kvm/cpuid.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -509,6 +509,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
509509
do_cpuid_1_ent(&entry[i], function, idx);
510510
if (idx == 1) {
511511
entry[i].eax &= kvm_supported_word10_x86_features;
512+
cpuid_mask(&entry[i].eax, 10);
512513
entry[i].ebx = 0;
513514
if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
514515
entry[i].ebx =

0 commit comments

Comments
 (0)