1 /* SPDX-License-Identifier: GPL-2.0
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
13 #include <net/page_pool.h>
14 #include <linux/dma-direction.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/page-flags.h>
17 #include <linux/mm.h> /* for __put_page() */
19 #include <trace/events/page_pool.h>
21 #define DEFER_TIME (msecs_to_jiffies(1000))
22 #define DEFER_WARN_INTERVAL (60 * HZ)
24 static int page_pool_init(struct page_pool
*pool
,
25 const struct page_pool_params
*params
)
27 unsigned int ring_qsize
= 1024; /* Default */
29 memcpy(&pool
->p
, params
, sizeof(pool
->p
));
31 /* Validate only known flags were used */
32 if (pool
->p
.flags
& ~(PP_FLAG_ALL
))
35 if (pool
->p
.pool_size
)
36 ring_qsize
= pool
->p
.pool_size
;
38 /* Sanity limit mem that can be pinned down */
39 if (ring_qsize
> 32768)
42 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
43 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
44 * which is the XDP_TX use-case.
46 if ((pool
->p
.dma_dir
!= DMA_FROM_DEVICE
) &&
47 (pool
->p
.dma_dir
!= DMA_BIDIRECTIONAL
))
50 if (pool
->p
.flags
& PP_FLAG_DMA_SYNC_DEV
) {
51 /* In order to request DMA-sync-for-device the page
54 if (!(pool
->p
.flags
& PP_FLAG_DMA_MAP
))
60 /* pool->p.offset has to be set according to the address
61 * offset used by the DMA engine to start copying rx data
65 if (ptr_ring_init(&pool
->ring
, ring_qsize
, GFP_KERNEL
) < 0)
68 atomic_set(&pool
->pages_state_release_cnt
, 0);
70 /* Driver calling page_pool_create() also call page_pool_destroy() */
71 refcount_set(&pool
->user_cnt
, 1);
73 if (pool
->p
.flags
& PP_FLAG_DMA_MAP
)
74 get_device(pool
->p
.dev
);
79 struct page_pool
*page_pool_create(const struct page_pool_params
*params
)
81 struct page_pool
*pool
;
84 pool
= kzalloc_node(sizeof(*pool
), GFP_KERNEL
, params
->nid
);
86 return ERR_PTR(-ENOMEM
);
88 err
= page_pool_init(pool
, params
);
90 pr_warn("%s() gave up with errno %d\n", __func__
, err
);
97 EXPORT_SYMBOL(page_pool_create
);
100 static struct page
*__page_pool_get_cached(struct page_pool
*pool
)
102 struct ptr_ring
*r
= &pool
->ring
;
106 /* Test for safe-context, caller should provide this guarantee */
107 if (likely(in_serving_softirq())) {
108 if (likely(pool
->alloc
.count
)) {
110 page
= pool
->alloc
.cache
[--pool
->alloc
.count
];
116 /* Quicker fallback, avoid locks when ring is empty */
117 if (__ptr_ring_empty(r
))
120 /* Slow-path: Get page from locked ring queue,
121 * refill alloc array if requested.
123 spin_lock(&r
->consumer_lock
);
124 page
= __ptr_ring_consume(r
);
126 pool
->alloc
.count
= __ptr_ring_consume_batched(r
,
128 PP_ALLOC_CACHE_REFILL
);
129 spin_unlock(&r
->consumer_lock
);
133 static void page_pool_dma_sync_for_device(struct page_pool
*pool
,
135 unsigned int dma_sync_size
)
137 dma_sync_size
= min(dma_sync_size
, pool
->p
.max_len
);
138 dma_sync_single_range_for_device(pool
->p
.dev
, page
->dma_addr
,
139 pool
->p
.offset
, dma_sync_size
,
145 static struct page
*__page_pool_alloc_pages_slow(struct page_pool
*pool
,
152 /* We could always set __GFP_COMP, and avoid this branch, as
153 * prep_new_page() can handle order-0 with __GFP_COMP.
158 /* FUTURE development:
160 * Current slow-path essentially falls back to single page
161 * allocations, which doesn't improve performance. This code
162 * need bulk allocation support from the page allocator code.
165 /* Cache was empty, do real allocation */
166 page
= alloc_pages_node(pool
->p
.nid
, gfp
, pool
->p
.order
);
170 if (!(pool
->p
.flags
& PP_FLAG_DMA_MAP
))
173 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
174 * since dma_addr_t can be either 32 or 64 bits and does not always fit
175 * into page private data (i.e 32bit cpu with 64bit DMA caps)
176 * This mapping is kept for lifetime of page, until leaving pool.
178 dma
= dma_map_page_attrs(pool
->p
.dev
, page
, 0,
179 (PAGE_SIZE
<< pool
->p
.order
),
180 pool
->p
.dma_dir
, DMA_ATTR_SKIP_CPU_SYNC
);
181 if (dma_mapping_error(pool
->p
.dev
, dma
)) {
185 page
->dma_addr
= dma
;
187 if (pool
->p
.flags
& PP_FLAG_DMA_SYNC_DEV
)
188 page_pool_dma_sync_for_device(pool
, page
, pool
->p
.max_len
);
191 /* Track how many pages are held 'in-flight' */
192 pool
->pages_state_hold_cnt
++;
194 trace_page_pool_state_hold(pool
, page
, pool
->pages_state_hold_cnt
);
196 /* When page just alloc'ed is should/must have refcnt 1. */
200 /* For using page_pool replace: alloc_pages() API calls, but provide
201 * synchronization guarantee for allocation side.
203 struct page
*page_pool_alloc_pages(struct page_pool
*pool
, gfp_t gfp
)
207 /* Fast-path: Get a page from cache */
208 page
= __page_pool_get_cached(pool
);
212 /* Slow-path: cache empty, do real allocation */
213 page
= __page_pool_alloc_pages_slow(pool
, gfp
);
216 EXPORT_SYMBOL(page_pool_alloc_pages
);
218 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
219 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
221 #define _distance(a, b) (s32)((a) - (b))
223 static s32
page_pool_inflight(struct page_pool
*pool
)
225 u32 release_cnt
= atomic_read(&pool
->pages_state_release_cnt
);
226 u32 hold_cnt
= READ_ONCE(pool
->pages_state_hold_cnt
);
229 inflight
= _distance(hold_cnt
, release_cnt
);
231 trace_page_pool_release(pool
, inflight
, hold_cnt
, release_cnt
);
232 WARN(inflight
< 0, "Negative(%d) inflight packet-pages", inflight
);
237 /* Cleanup page_pool state from page */
238 static void __page_pool_clean_page(struct page_pool
*pool
,
244 if (!(pool
->p
.flags
& PP_FLAG_DMA_MAP
))
247 dma
= page
->dma_addr
;
249 dma_unmap_page_attrs(pool
->p
.dev
, dma
,
250 PAGE_SIZE
<< pool
->p
.order
, pool
->p
.dma_dir
,
251 DMA_ATTR_SKIP_CPU_SYNC
);
254 /* This may be the last page returned, releasing the pool, so
255 * it is not safe to reference pool afterwards.
257 count
= atomic_inc_return(&pool
->pages_state_release_cnt
);
258 trace_page_pool_state_release(pool
, page
, count
);
261 /* unmap the page and clean our state */
262 void page_pool_unmap_page(struct page_pool
*pool
, struct page
*page
)
264 /* When page is unmapped, this implies page will not be
265 * returned to page_pool.
267 __page_pool_clean_page(pool
, page
);
269 EXPORT_SYMBOL(page_pool_unmap_page
);
271 /* Return a page to the page allocator, cleaning up our state */
272 static void __page_pool_return_page(struct page_pool
*pool
, struct page
*page
)
274 __page_pool_clean_page(pool
, page
);
277 /* An optimization would be to call __free_pages(page, pool->p.order)
278 * knowing page is not part of page-cache (thus avoiding a
279 * __page_cache_release() call).
283 static bool __page_pool_recycle_into_ring(struct page_pool
*pool
,
287 /* BH protection not needed if current is serving softirq */
288 if (in_serving_softirq())
289 ret
= ptr_ring_produce(&pool
->ring
, page
);
291 ret
= ptr_ring_produce_bh(&pool
->ring
, page
);
293 return (ret
== 0) ? true : false;
296 /* Only allow direct recycling in special circumstances, into the
297 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
299 * Caller must provide appropriate safe context.
301 static bool __page_pool_recycle_direct(struct page
*page
,
302 struct page_pool
*pool
)
304 if (unlikely(pool
->alloc
.count
== PP_ALLOC_CACHE_SIZE
))
307 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
308 pool
->alloc
.cache
[pool
->alloc
.count
++] = page
;
312 /* page is NOT reusable when:
313 * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
314 * 2) belongs to a different NUMA node than pool->p.nid.
316 * To update pool->p.nid users must call page_pool_update_nid.
318 static bool pool_page_reusable(struct page_pool
*pool
, struct page
*page
)
320 return !page_is_pfmemalloc(page
) && page_to_nid(page
) == pool
->p
.nid
;
323 void __page_pool_put_page(struct page_pool
*pool
, struct page
*page
,
324 unsigned int dma_sync_size
, bool allow_direct
)
326 /* This allocator is optimized for the XDP mode that uses
327 * one-frame-per-page, but have fallbacks that act like the
328 * regular page allocator APIs.
330 * refcnt == 1 means page_pool owns page, and can recycle it.
332 if (likely(page_ref_count(page
) == 1 &&
333 pool_page_reusable(pool
, page
))) {
334 /* Read barrier done in page_ref_count / READ_ONCE */
336 if (pool
->p
.flags
& PP_FLAG_DMA_SYNC_DEV
)
337 page_pool_dma_sync_for_device(pool
, page
,
340 if (allow_direct
&& in_serving_softirq())
341 if (__page_pool_recycle_direct(page
, pool
))
344 if (!__page_pool_recycle_into_ring(pool
, page
)) {
345 /* Cache full, fallback to free pages */
346 __page_pool_return_page(pool
, page
);
350 /* Fallback/non-XDP mode: API user have elevated refcnt.
352 * Many drivers split up the page into fragments, and some
353 * want to keep doing this to save memory and do refcnt based
354 * recycling. Support this use case too, to ease drivers
355 * switching between XDP/non-XDP.
357 * In-case page_pool maintains the DMA mapping, API user must
358 * call page_pool_put_page once. In this elevated refcnt
359 * case, the DMA is unmapped/released, as driver is likely
360 * doing refcnt based recycle tricks, meaning another process
361 * will be invoking put_page.
363 __page_pool_clean_page(pool
, page
);
366 EXPORT_SYMBOL(__page_pool_put_page
);
368 static void __page_pool_empty_ring(struct page_pool
*pool
)
372 /* Empty recycle ring */
373 while ((page
= ptr_ring_consume_bh(&pool
->ring
))) {
374 /* Verify the refcnt invariant of cached pages */
375 if (!(page_ref_count(page
) == 1))
376 pr_crit("%s() page_pool refcnt %d violation\n",
377 __func__
, page_ref_count(page
));
379 __page_pool_return_page(pool
, page
);
383 static void page_pool_free(struct page_pool
*pool
)
385 if (pool
->disconnect
)
386 pool
->disconnect(pool
);
388 ptr_ring_cleanup(&pool
->ring
, NULL
);
390 if (pool
->p
.flags
& PP_FLAG_DMA_MAP
)
391 put_device(pool
->p
.dev
);
396 static void page_pool_empty_alloc_cache_once(struct page_pool
*pool
)
400 if (pool
->destroy_cnt
)
403 /* Empty alloc cache, assume caller made sure this is
404 * no-longer in use, and page_pool_alloc_pages() cannot be
407 while (pool
->alloc
.count
) {
408 page
= pool
->alloc
.cache
[--pool
->alloc
.count
];
409 __page_pool_return_page(pool
, page
);
413 static void page_pool_scrub(struct page_pool
*pool
)
415 page_pool_empty_alloc_cache_once(pool
);
418 /* No more consumers should exist, but producers could still
421 __page_pool_empty_ring(pool
);
424 static int page_pool_release(struct page_pool
*pool
)
428 page_pool_scrub(pool
);
429 inflight
= page_pool_inflight(pool
);
431 page_pool_free(pool
);
436 static void page_pool_release_retry(struct work_struct
*wq
)
438 struct delayed_work
*dwq
= to_delayed_work(wq
);
439 struct page_pool
*pool
= container_of(dwq
, typeof(*pool
), release_dw
);
442 inflight
= page_pool_release(pool
);
446 /* Periodic warning */
447 if (time_after_eq(jiffies
, pool
->defer_warn
)) {
448 int sec
= (s32
)((u32
)jiffies
- (u32
)pool
->defer_start
) / HZ
;
450 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
451 __func__
, inflight
, sec
);
452 pool
->defer_warn
= jiffies
+ DEFER_WARN_INTERVAL
;
455 /* Still not ready to be disconnected, retry later */
456 schedule_delayed_work(&pool
->release_dw
, DEFER_TIME
);
459 void page_pool_use_xdp_mem(struct page_pool
*pool
, void (*disconnect
)(void *))
461 refcount_inc(&pool
->user_cnt
);
462 pool
->disconnect
= disconnect
;
465 void page_pool_destroy(struct page_pool
*pool
)
470 if (!page_pool_put(pool
))
473 if (!page_pool_release(pool
))
476 pool
->defer_start
= jiffies
;
477 pool
->defer_warn
= jiffies
+ DEFER_WARN_INTERVAL
;
479 INIT_DELAYED_WORK(&pool
->release_dw
, page_pool_release_retry
);
480 schedule_delayed_work(&pool
->release_dw
, DEFER_TIME
);
482 EXPORT_SYMBOL(page_pool_destroy
);
484 /* Caller must provide appropriate safe context, e.g. NAPI. */
485 void page_pool_update_nid(struct page_pool
*pool
, int new_nid
)
487 trace_page_pool_update_nid(pool
, new_nid
);
488 pool
->p
.nid
= new_nid
;
490 EXPORT_SYMBOL(page_pool_update_nid
);