kernel - Fix auto port assignment collision in network code
[dragonfly.git] / sys / dev / drm / i915 / i915_utils.h
blobaf3d7cc53fa13783a8b914e7deaf22ed25cd50d2
1 /*
2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
25 #ifndef __I915_UTILS_H
26 #define __I915_UTILS_H
28 #undef WARN_ON
29 /* Many gcc seem to no see through this and fall over :( */
30 #if 0
31 #define WARN_ON(x) ({ \
32 bool __i915_warn_cond = (x); \
33 if (__builtin_constant_p(__i915_warn_cond)) \
34 BUILD_BUG_ON(__i915_warn_cond); \
35 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
36 #else
37 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
38 #endif
40 #undef WARN_ON_ONCE
41 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
43 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
44 (long)(x), __func__)
46 #if GCC_VERSION >= 70000
47 #define add_overflows(A, B) \
48 __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
49 #else
50 #define add_overflows(A, B) ({ \
51 typeof(A) a = (A); \
52 typeof(B) b = (B); \
53 a + b < a; \
55 #endif
57 #define range_overflows(start, size, max) ({ \
58 typeof(start) start__ = (start); \
59 typeof(size) size__ = (size); \
60 typeof(max) max__ = (max); \
61 (void)(&start__ == &size__); \
62 (void)(&start__ == &max__); \
63 start__ > max__ || size__ > max__ - start__; \
66 #define range_overflows_t(type, start, size, max) \
67 range_overflows((type)(start), (type)(size), (type)(max))
69 /* Note we don't consider signbits :| */
70 #define overflows_type(x, T) \
71 (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
73 #define ptr_mask_bits(ptr, n) ({ \
74 unsigned long __v = (unsigned long)(ptr); \
75 (typeof(ptr))(__v & -BIT(n)); \
78 #define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1))
80 #define ptr_unpack_bits(ptr, bits, n) ({ \
81 unsigned long __v = (unsigned long)(ptr); \
82 *(bits) = __v & (BIT(n) - 1); \
83 (typeof(ptr))(__v & -BIT(n)); \
86 #define ptr_pack_bits(ptr, bits, n) \
87 ((typeof(ptr))((unsigned long)(ptr) | (bits)))
89 #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
90 #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
91 #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
92 #define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
94 #define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
96 #define fetch_and_zero(ptr) ({ \
97 typeof(*ptr) __T = *(ptr); \
98 *(ptr) = (typeof(*ptr))0; \
99 __T; \
102 static inline u64 ptr_to_u64(const void *ptr)
104 return (uintptr_t)ptr;
107 #define u64_to_ptr(T, x) ({ \
108 typecheck(u64, x); \
109 (T *)(uintptr_t)(x); \
112 #define __mask_next_bit(mask) ({ \
113 int __idx = ffs(mask) - 1; \
114 mask &= ~BIT(__idx); \
115 __idx; \
118 #include <linux/list.h>
120 static inline void __list_del_many(struct list_head *head,
121 struct list_head *first)
123 first->prev = head;
124 WRITE_ONCE(head->next, first);
128 * Wait until the work is finally complete, even if it tries to postpone
129 * by requeueing itself. Note, that if the worker never cancels itself,
130 * we will spin forever.
132 static inline void drain_delayed_work(struct delayed_work *dw)
134 do {
135 while (flush_delayed_work(dw))
137 } while (delayed_work_pending(dw));
140 #endif /* !__I915_UTILS_H */