Skip to content

Commit fd31a1b

Browse files
committed
Merge tag 'for-linus-6.14-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen fixes from Juergen Gross: "Three fixes to xen-swiotlb driver: - two fixes for issues coming up due to another fix in 6.12 - addition of an __init annotation" * tag 'for-linus-6.14-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: Xen/swiotlb: mark xen_swiotlb_fixup() __init x86/xen: allow larger contiguous memory regions in PV guests xen/swiotlb: relax alignment requirements
2 parents 128c8f9 + 75ad023 commit fd31a1b

File tree

2 files changed

+75
-18
lines changed

2 files changed

+75
-18
lines changed

arch/x86/xen/mmu_pv.c

Lines changed: 62 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,51 @@ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
111111
*/
112112
static DEFINE_SPINLOCK(xen_reservation_lock);
113113

114+
/* Protected by xen_reservation_lock. */
115+
#define MIN_CONTIG_ORDER 9 /* 2MB */
116+
static unsigned int discontig_frames_order = MIN_CONTIG_ORDER;
117+
static unsigned long discontig_frames_early[1UL << MIN_CONTIG_ORDER] __initdata;
118+
static unsigned long *discontig_frames __refdata = discontig_frames_early;
119+
static bool discontig_frames_dyn;
120+
121+
static int alloc_discontig_frames(unsigned int order)
122+
{
123+
unsigned long *new_array, *old_array;
124+
unsigned int old_order;
125+
unsigned long flags;
126+
127+
BUG_ON(order < MIN_CONTIG_ORDER);
128+
BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE);
129+
130+
new_array = (unsigned long *)__get_free_pages(GFP_KERNEL,
131+
order - MIN_CONTIG_ORDER);
132+
if (!new_array)
133+
return -ENOMEM;
134+
135+
spin_lock_irqsave(&xen_reservation_lock, flags);
136+
137+
old_order = discontig_frames_order;
138+
139+
if (order > discontig_frames_order || !discontig_frames_dyn) {
140+
if (!discontig_frames_dyn)
141+
old_array = NULL;
142+
else
143+
old_array = discontig_frames;
144+
145+
discontig_frames = new_array;
146+
discontig_frames_order = order;
147+
discontig_frames_dyn = true;
148+
} else {
149+
old_array = new_array;
150+
}
151+
152+
spin_unlock_irqrestore(&xen_reservation_lock, flags);
153+
154+
free_pages((unsigned long)old_array, old_order - MIN_CONTIG_ORDER);
155+
156+
return 0;
157+
}
158+
114159
/*
115160
* Note about cr3 (pagetable base) values:
116161
*
@@ -814,6 +859,9 @@ static void __init xen_after_bootmem(void)
814859
SetPagePinned(virt_to_page(level3_user_vsyscall));
815860
#endif
816861
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
862+
863+
if (alloc_discontig_frames(MIN_CONTIG_ORDER))
864+
BUG();
817865
}
818866

819867
static void xen_unpin_page(struct mm_struct *mm, struct page *page,
@@ -2203,10 +2251,6 @@ void __init xen_init_mmu_ops(void)
22032251
memset(dummy_mapping, 0xff, PAGE_SIZE);
22042252
}
22052253

2206-
/* Protected by xen_reservation_lock. */
2207-
#define MAX_CONTIG_ORDER 9 /* 2MB */
2208-
static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2209-
22102254
#define VOID_PTE (mfn_pte(0, __pgprot(0)))
22112255
static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
22122256
unsigned long *in_frames,
@@ -2323,18 +2367,25 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
23232367
unsigned int address_bits,
23242368
dma_addr_t *dma_handle)
23252369
{
2326-
unsigned long *in_frames = discontig_frames, out_frame;
2370+
unsigned long *in_frames, out_frame;
23272371
unsigned long flags;
23282372
int success;
23292373
unsigned long vstart = (unsigned long)phys_to_virt(pstart);
23302374

2331-
if (unlikely(order > MAX_CONTIG_ORDER))
2332-
return -ENOMEM;
2375+
if (unlikely(order > discontig_frames_order)) {
2376+
if (!discontig_frames_dyn)
2377+
return -ENOMEM;
2378+
2379+
if (alloc_discontig_frames(order))
2380+
return -ENOMEM;
2381+
}
23332382

23342383
memset((void *) vstart, 0, PAGE_SIZE << order);
23352384

23362385
spin_lock_irqsave(&xen_reservation_lock, flags);
23372386

2387+
in_frames = discontig_frames;
2388+
23382389
/* 1. Zap current PTEs, remembering MFNs. */
23392390
xen_zap_pfn_range(vstart, order, in_frames, NULL);
23402391

@@ -2358,19 +2409,21 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
23582409

23592410
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
23602411
{
2361-
unsigned long *out_frames = discontig_frames, in_frame;
2412+
unsigned long *out_frames, in_frame;
23622413
unsigned long flags;
23632414
int success;
23642415
unsigned long vstart;
23652416

2366-
if (unlikely(order > MAX_CONTIG_ORDER))
2417+
if (unlikely(order > discontig_frames_order))
23672418
return;
23682419

23692420
vstart = (unsigned long)phys_to_virt(pstart);
23702421
memset((void *) vstart, 0, PAGE_SIZE << order);
23712422

23722423
spin_lock_irqsave(&xen_reservation_lock, flags);
23732424

2425+
out_frames = discontig_frames;
2426+
23742427
/* 1. Find start MFN of contiguous extent. */
23752428
in_frame = virt_to_mfn((void *)vstart);
23762429

drivers/xen/swiotlb-xen.c

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
7474
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
7575
}
7676

77+
static inline bool range_requires_alignment(phys_addr_t p, size_t size)
78+
{
79+
phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
80+
phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
81+
82+
return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
83+
}
84+
7785
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
7886
{
7987
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
8088
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
81-
phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
8289

8390
next_bfn = pfn_to_bfn(xen_pfn);
8491

85-
/* If buffer is physically aligned, ensure DMA alignment. */
86-
if (IS_ALIGNED(p, algn) &&
87-
!IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
88-
return 1;
89-
9092
for (i = 1; i < nr_pages; i++)
9193
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
9294
return 1;
@@ -111,7 +113,7 @@ static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
111113
}
112114

113115
#ifdef CONFIG_X86
114-
int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
116+
int __init xen_swiotlb_fixup(void *buf, unsigned long nslabs)
115117
{
116118
int rc;
117119
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
@@ -156,7 +158,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
156158

157159
*dma_handle = xen_phys_to_dma(dev, phys);
158160
if (*dma_handle + size - 1 > dma_mask ||
159-
range_straddles_page_boundary(phys, size)) {
161+
range_straddles_page_boundary(phys, size) ||
162+
range_requires_alignment(phys, size)) {
160163
if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
161164
dma_handle) != 0)
162165
goto out_free_pages;
@@ -182,7 +185,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
182185
size = ALIGN(size, XEN_PAGE_SIZE);
183186

184187
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
185-
WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
188+
WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
189+
range_requires_alignment(phys, size)))
186190
return;
187191

188192
if (TestClearPageXenRemapped(virt_to_page(vaddr)))

0 commit comments

Comments
 (0)